diff mbox

[kvm-unit-tests] arm: fix crash when caches are off

Message ID 1410833175-25547-1-git-send-email-drjones@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Jones Sept. 16, 2014, 2:06 a.m. UTC
We shouldn't try Load-Exclusive instructions unless we've enabled memory
management, as these instructions depend on the data cache unit's
coherency monitor. This patch adds a new setup boolean, initialized to false,
that is used to guard Load-Exclusive instructions. Eventually we'll add more
setup code that sets it true.

Note: This problem became visible on boards with Cortex-A7 processors. Testing
with Cortex-A15 didn't expose it, as those may have an external coherency
monitor that still allows the instruction to execute (on A7s we got data
aborts). Although even on A15's it's not clear from the spec if the
instructions will behave as expected while caches are off, so we no longer
allow Load-Exclusive instructions on those processors without caches enabled
either.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/arm/asm/setup.h |  2 ++
 lib/arm/setup.c     |  1 +
 lib/arm/spinlock.c  | 10 ++++++++++
 3 files changed, 13 insertions(+)

Comments

Paolo Bonzini Sept. 16, 2014, 8:14 a.m. UTC | #1
Il 16/09/2014 04:06, Andrew Jones ha scritto:
> We shouldn't try Load-Exclusive instructions unless we've enabled memory
> management, as these instructions depend on the data cache unit's
> coherency monitor. This patch adds a new setup boolean, initialized to false,
> that is used to guard Load-Exclusive instructions. Eventually we'll add more
> setup code that sets it true.
> 
> Note: This problem became visible on boards with Cortex-A7 processors. Testing
> with Cortex-A15 didn't expose it, as those may have an external coherency
> monitor that still allows the instruction to execute (on A7s we got data
> aborts). Although even on A15's it's not clear from the spec if the
> instructions will behave as expected while caches are off, so we no longer
> allow Load-Exclusive instructions on those processors without caches enabled
> either.
> 
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>  lib/arm/asm/setup.h |  2 ++
>  lib/arm/setup.c     |  1 +
>  lib/arm/spinlock.c  | 10 ++++++++++
>  3 files changed, 13 insertions(+)
> 
> diff --git a/lib/arm/asm/setup.h b/lib/arm/asm/setup.h
> index 21445ef2085fc..9c54c184e2866 100644
> --- a/lib/arm/asm/setup.h
> +++ b/lib/arm/asm/setup.h
> @@ -20,6 +20,8 @@ extern phys_addr_t __phys_offset, __phys_end;
>  #define PHYS_SIZE		(1ULL << PHYS_SHIFT)
>  #define PHYS_MASK		(PHYS_SIZE - 1ULL)
>  
> +extern bool mem_caches_enabled;
> +
>  #define L1_CACHE_SHIFT		6
>  #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
>  #define SMP_CACHE_BYTES		L1_CACHE_BYTES
> diff --git a/lib/arm/setup.c b/lib/arm/setup.c
> index 3941c9757dcb2..f7ed639c9d499 100644
> --- a/lib/arm/setup.c
> +++ b/lib/arm/setup.c
> @@ -25,6 +25,7 @@ u32 cpus[NR_CPUS] = { [0 ... NR_CPUS-1] = (~0UL) };
>  int nr_cpus;
>  
>  phys_addr_t __phys_offset, __phys_end;
> +bool mem_caches_enabled;
>  
>  static void cpu_set(int fdtnode __unused, u32 regval, void *info __unused)
>  {
> diff --git a/lib/arm/spinlock.c b/lib/arm/spinlock.c
> index d8a6d4c3383d6..43539c5e84062 100644
> --- a/lib/arm/spinlock.c
> +++ b/lib/arm/spinlock.c
> @@ -1,12 +1,22 @@
>  #include "libcflat.h"
>  #include "asm/spinlock.h"
>  #include "asm/barrier.h"
> +#include "asm/setup.h"
>  
>  void spin_lock(struct spinlock *lock)
>  {
>  	u32 val, fail;
>  
>  	dmb();
> +
> +	/*
> +	 * Without caches enabled Load-Exclusive instructions may fail.
> +	 * In that case we do nothing, and just hope the caller knows
> +	 * what they're doing.
> +	 */
> +	if (!mem_caches_enabled)
> +		return;

Should it at least write 1 to the spinlock?

Paolo

>  	do {
>  		asm volatile(
>  		"1:	ldrex	%0, [%2]\n"
> 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrew Jones Sept. 16, 2014, 12:12 p.m. UTC | #2
----- Original Message -----
> Il 16/09/2014 04:06, Andrew Jones ha scritto:
> > We shouldn't try Load-Exclusive instructions unless we've enabled memory
> > management, as these instructions depend on the data cache unit's
> > coherency monitor. This patch adds a new setup boolean, initialized to
> > false,
> > that is used to guard Load-Exclusive instructions. Eventually we'll add
> > more
> > setup code that sets it true.
> > 
> > Note: This problem became visible on boards with Cortex-A7 processors.
> > Testing
> > with Cortex-A15 didn't expose it, as those may have an external coherency
> > monitor that still allows the instruction to execute (on A7s we got data
> > aborts). Although even on A15's it's not clear from the spec if the
> > instructions will behave as expected while caches are off, so we no longer
> > allow Load-Exclusive instructions on those processors without caches
> > enabled
> > either.
> > 
> > Signed-off-by: Andrew Jones <drjones@redhat.com>
> > ---
> >  lib/arm/asm/setup.h |  2 ++
> >  lib/arm/setup.c     |  1 +
> >  lib/arm/spinlock.c  | 10 ++++++++++
> >  3 files changed, 13 insertions(+)
> > 
> > diff --git a/lib/arm/asm/setup.h b/lib/arm/asm/setup.h
> > index 21445ef2085fc..9c54c184e2866 100644
> > --- a/lib/arm/asm/setup.h
> > +++ b/lib/arm/asm/setup.h
> > @@ -20,6 +20,8 @@ extern phys_addr_t __phys_offset, __phys_end;
> >  #define PHYS_SIZE		(1ULL << PHYS_SHIFT)
> >  #define PHYS_MASK		(PHYS_SIZE - 1ULL)
> >  
> > +extern bool mem_caches_enabled;
> > +
> >  #define L1_CACHE_SHIFT		6
> >  #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
> >  #define SMP_CACHE_BYTES		L1_CACHE_BYTES
> > diff --git a/lib/arm/setup.c b/lib/arm/setup.c
> > index 3941c9757dcb2..f7ed639c9d499 100644
> > --- a/lib/arm/setup.c
> > +++ b/lib/arm/setup.c
> > @@ -25,6 +25,7 @@ u32 cpus[NR_CPUS] = { [0 ... NR_CPUS-1] = (~0UL) };
> >  int nr_cpus;
> >  
> >  phys_addr_t __phys_offset, __phys_end;
> > +bool mem_caches_enabled;
> >  
> >  static void cpu_set(int fdtnode __unused, u32 regval, void *info __unused)
> >  {
> > diff --git a/lib/arm/spinlock.c b/lib/arm/spinlock.c
> > index d8a6d4c3383d6..43539c5e84062 100644
> > --- a/lib/arm/spinlock.c
> > +++ b/lib/arm/spinlock.c
> > @@ -1,12 +1,22 @@
> >  #include "libcflat.h"
> >  #include "asm/spinlock.h"
> >  #include "asm/barrier.h"
> > +#include "asm/setup.h"
> >  
> >  void spin_lock(struct spinlock *lock)
> >  {
> >  	u32 val, fail;
> >  
> >  	dmb();
> > +
> > +	/*
> > +	 * Without caches enabled Load-Exclusive instructions may fail.
> > +	 * In that case we do nothing, and just hope the caller knows
> > +	 * what they're doing.
> > +	 */
> > +	if (!mem_caches_enabled)
> > +		return;
> 
> Should it at least write 1 to the spinlock?

I thought about that. So on one hand we might get a somewhat functional
synchronization mechanism, which may be enough for some unit test that
doesn't enable caches, but still needs it. On the other hand, we know
its broken, so we don't really want any unit tests that need synchronization
and don't enable caches. I chose to not write a 1 in the hope that if
a unit test introduces a race, that that race will be easier to expose
and fix. That said, I'm not strongly biased, as we'd still have a race,
which may or may not be easy to expose, either way. So if the majority
prefers a best effort approach, then I'll spin a v2.

drew

> 
> Paolo
> 
> >  	do {
> >  		asm volatile(
> >  		"1:	ldrex	%0, [%2]\n"
> > 
> 
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini Sept. 16, 2014, 12:28 p.m. UTC | #3
Il 16/09/2014 14:12, Andrew Jones ha scritto:
>> > Should it at least write 1 to the spinlock?
> I thought about that. So on one hand we might get a somewhat functional
> synchronization mechanism, which may be enough for some unit test that
> doesn't enable caches, but still needs it. On the other hand, we know
> its broken, so we don't really want any unit tests that need synchronization
> and don't enable caches. I chose to not write a 1 in the hope that if
> a unit test introduces a race, that that race will be easier to expose
> and fix. That said, I'm not strongly biased, as we'd still have a race,
> which may or may not be easy to expose, either way. So if the majority
> prefers a best effort approach, then I'll spin a v2.

The case I was thinking about was something like

    spin_lock()
    enable caches
    start other processors
    spin_unlock()

I'm not sure if it makes sense though. :)

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrew Jones Sept. 16, 2014, 12:43 p.m. UTC | #4
----- Original Message -----
> Il 16/09/2014 14:12, Andrew Jones ha scritto:
> >> > Should it at least write 1 to the spinlock?
> > I thought about that. So on one hand we might get a somewhat functional
> > synchronization mechanism, which may be enough for some unit test that
> > doesn't enable caches, but still needs it. On the other hand, we know
> > its broken, so we don't really want any unit tests that need
> > synchronization
> > and don't enable caches. I chose to not write a 1 in the hope that if
> > a unit test introduces a race, that that race will be easier to expose
> > and fix. That said, I'm not strongly biased, as we'd still have a race,
> > which may or may not be easy to expose, either way. So if the majority
> > prefers a best effort approach, then I'll spin a v2.
> 
> The case I was thinking about was something like
> 
>     spin_lock()
>     enable caches
>     start other processors
>     spin_unlock()
> 
> I'm not sure if it makes sense though. :)

I don't think we need to worry about this case. AFAIU, enabling the
caches for a particular cpu shouldn't require any synchronization.
So we should be able to do

    enable caches
    spin_lock
    start other processors
    spin_unlock

drew

> 
> Paolo
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini Sept. 16, 2014, 12:47 p.m. UTC | #5
Il 16/09/2014 14:43, Andrew Jones ha scritto:
> I don't think we need to worry about this case. AFAIU, enabling the
> caches for a particular cpu shouldn't require any synchronization.
> So we should be able to do
> 
>     enable caches
>     spin_lock
>     start other processors
>     spin_unlock

Ok, I'll test and apply your patch then.

Once you change the code to enable caches, please consider hanging on
spin_lock with caches disabled.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrew Jones Sept. 16, 2014, 12:48 p.m. UTC | #6
----- Original Message -----
> 
> 
> ----- Original Message -----
> > Il 16/09/2014 14:12, Andrew Jones ha scritto:
> > >> > Should it at least write 1 to the spinlock?
> > > I thought about that. So on one hand we might get a somewhat functional
> > > synchronization mechanism, which may be enough for some unit test that
> > > doesn't enable caches, but still needs it. On the other hand, we know
> > > its broken, so we don't really want any unit tests that need
> > > synchronization
> > > and don't enable caches. I chose to not write a 1 in the hope that if
> > > a unit test introduces a race, that that race will be easier to expose
> > > and fix. That said, I'm not strongly biased, as we'd still have a race,
> > > which may or may not be easy to expose, either way. So if the majority
> > > prefers a best effort approach, then I'll spin a v2.
> > 
> > The case I was thinking about was something like
> > 
> >     spin_lock()
> >     enable caches
> >     start other processors
> >     spin_unlock()
> > 
> > I'm not sure if it makes sense though. :)
> 
> I don't think we need to worry about this case. AFAIU, enabling the
> caches for a particular cpu shouldn't require any synchronization.
> So we should be able to do
> 
>     enable caches
>     spin_lock
>     start other processors
>     spin_unlock
> 

Oh drat. I just introduced an issue with enabling caches without working
spin locks. This new boolean. This boolean will need to be per cpu.

I'll send a v2.



> drew
> 
> > 
> > Paolo
> > 
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrew Jones Sept. 16, 2014, 12:51 p.m. UTC | #7
----- Original Message -----
> Il 16/09/2014 14:43, Andrew Jones ha scritto:
> > I don't think we need to worry about this case. AFAIU, enabling the
> > caches for a particular cpu shouldn't require any synchronization.
> > So we should be able to do
> > 
> >     enable caches
> >     spin_lock
> >     start other processors
> >     spin_unlock
> 
> Ok, I'll test and apply your patch then.

Actually, yeah, please apply now in order to get A7 boards working.
I'll do a follow-on patch to fix the case above (which will require
deciding how to hand per cpu data).

drew

> 
> Once you change the code to enable caches, please consider hanging on
> spin_lock with caches disabled.
> 
> Paolo
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrew Jones Sept. 16, 2014, 12:57 p.m. UTC | #8
----- Original Message -----
> Il 16/09/2014 14:43, Andrew Jones ha scritto:
> > I don't think we need to worry about this case. AFAIU, enabling the
> > caches for a particular cpu shouldn't require any synchronization.
> > So we should be able to do
> > 
> >     enable caches
> >     spin_lock
> >     start other processors
> >     spin_unlock
> 
> Ok, I'll test and apply your patch then.
> 
> Once you change the code to enable caches, please consider hanging on
> spin_lock with caches disabled.

Unfortunately I can't do that without changing spin_lock into a wrapper
function. Early setup code calls functions that use spin_locks, e.g.
puts(), and we won't want to move the cache enablement into early setup
code, as that should be left for unit tests to turn on off as they wish.
Thus we either need to be able to change the spin_lock implementation
dynamically, or just leave the test/return as is.

drew

> 
> Paolo
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrew Jones Sept. 16, 2014, 2:38 p.m. UTC | #9
----- Original Message -----
> 
> 
> ----- Original Message -----
> > Il 16/09/2014 14:43, Andrew Jones ha scritto:
> > > I don't think we need to worry about this case. AFAIU, enabling the
> > > caches for a particular cpu shouldn't require any synchronization.
> > > So we should be able to do
> > > 
> > >     enable caches
> > >     spin_lock
> > >     start other processors
> > >     spin_unlock
> > 
> > Ok, I'll test and apply your patch then.
> 
> Actually, yeah, please apply now in order to get A7 boards working.
> I'll do a follow-on patch to fix the case above (which will require
> deciding how to hand per cpu data).

Post coffee, I don't see why I shouldn't just use SCTLR.C as my
boolean, which is of course per cpu, and means the same thing,
i.e. caches enabled or not. I'll send a v2 that drops
mem_caches_enabled, and modifies the logic of the spin_lock asm.

drew

> 
> drew
> 
> > 
> > Once you change the code to enable caches, please consider hanging on
> > spin_lock with caches disabled.
> > 
> > Paolo
> > --
> > To unsubscribe from this list: send the line "unsubscribe kvm" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > 
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrew Jones Sept. 16, 2014, 6:04 p.m. UTC | #10
On Tue, Sep 16, 2014 at 10:38:11AM -0400, Andrew Jones wrote:
> 
> 
> ----- Original Message -----
> > 
> > 
> > ----- Original Message -----
> > > Il 16/09/2014 14:43, Andrew Jones ha scritto:
> > > > I don't think we need to worry about this case. AFAIU, enabling the
> > > > caches for a particular cpu shouldn't require any synchronization.
> > > > So we should be able to do
> > > > 
> > > >     enable caches
> > > >     spin_lock
> > > >     start other processors
> > > >     spin_unlock
> > > 
> > > Ok, I'll test and apply your patch then.
> > 
> > Actually, yeah, please apply now in order to get A7 boards working.
> > I'll do a follow-on patch to fix the case above (which will require
> > deciding how to hand per cpu data).
> 
> Post coffee, I don't see why I shouldn't just use SCTLR.C as my
> boolean, which is of course per cpu, and means the same thing,
> i.e. caches enabled or not. I'll send a v2 that drops
> mem_caches_enabled, and modifies the logic of the spin_lock asm.

Scratch this idea. It breaks the use of spin_lock from usr mode.
I think plan B is the best, which is; apply this patch, and then I'll
make mem_caches_enabled per cpu later, once we support per cpu data.

> > 
> > > 
> > > Once you change the code to enable caches, please consider hanging on
> > > spin_lock with caches disabled.
> > > 
> > > Paolo
> > > --
> > > To unsubscribe from this list: send the line "unsubscribe kvm" in
> > > the body of a message to majordomo@vger.kernel.org
> > > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > > 
> > _______________________________________________
> > kvmarm mailing list
> > kvmarm@lists.cs.columbia.edu
> > https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
> > 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini Sept. 18, 2014, 10:36 a.m. UTC | #11
Il 16/09/2014 04:06, Andrew Jones ha scritto:
> We shouldn't try Load-Exclusive instructions unless we've enabled memory
> management, as these instructions depend on the data cache unit's
> coherency monitor. This patch adds a new setup boolean, initialized to false,
> that is used to guard Load-Exclusive instructions. Eventually we'll add more
> setup code that sets it true.
> 
> Note: This problem became visible on boards with Cortex-A7 processors. Testing
> with Cortex-A15 didn't expose it, as those may have an external coherency
> monitor that still allows the instruction to execute (on A7s we got data
> aborts). Although even on A15's it's not clear from the spec if the
> instructions will behave as expected while caches are off, so we no longer
> allow Load-Exclusive instructions on those processors without caches enabled
> either.
> 
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>  lib/arm/asm/setup.h |  2 ++
>  lib/arm/setup.c     |  1 +
>  lib/arm/spinlock.c  | 10 ++++++++++
>  3 files changed, 13 insertions(+)
> 
> diff --git a/lib/arm/asm/setup.h b/lib/arm/asm/setup.h
> index 21445ef2085fc..9c54c184e2866 100644
> --- a/lib/arm/asm/setup.h
> +++ b/lib/arm/asm/setup.h
> @@ -20,6 +20,8 @@ extern phys_addr_t __phys_offset, __phys_end;
>  #define PHYS_SIZE		(1ULL << PHYS_SHIFT)
>  #define PHYS_MASK		(PHYS_SIZE - 1ULL)
>  
> +extern bool mem_caches_enabled;
> +
>  #define L1_CACHE_SHIFT		6
>  #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
>  #define SMP_CACHE_BYTES		L1_CACHE_BYTES
> diff --git a/lib/arm/setup.c b/lib/arm/setup.c
> index 3941c9757dcb2..f7ed639c9d499 100644
> --- a/lib/arm/setup.c
> +++ b/lib/arm/setup.c
> @@ -25,6 +25,7 @@ u32 cpus[NR_CPUS] = { [0 ... NR_CPUS-1] = (~0UL) };
>  int nr_cpus;
>  
>  phys_addr_t __phys_offset, __phys_end;
> +bool mem_caches_enabled;
>  
>  static void cpu_set(int fdtnode __unused, u32 regval, void *info __unused)
>  {
> diff --git a/lib/arm/spinlock.c b/lib/arm/spinlock.c
> index d8a6d4c3383d6..43539c5e84062 100644
> --- a/lib/arm/spinlock.c
> +++ b/lib/arm/spinlock.c
> @@ -1,12 +1,22 @@
>  #include "libcflat.h"
>  #include "asm/spinlock.h"
>  #include "asm/barrier.h"
> +#include "asm/setup.h"
>  
>  void spin_lock(struct spinlock *lock)
>  {
>  	u32 val, fail;
>  
>  	dmb();
> +
> +	/*
> +	 * Without caches enabled Load-Exclusive instructions may fail.
> +	 * In that case we do nothing, and just hope the caller knows
> +	 * what they're doing.
> +	 */
> +	if (!mem_caches_enabled)
> +		return;
> +
>  	do {
>  		asm volatile(
>  		"1:	ldrex	%0, [%2]\n"
> 

Tested and pushed.  Thanks!

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoffer Dall Sept. 26, 2014, 7:51 a.m. UTC | #12
On Tue, Sep 16, 2014 at 08:57:31AM -0400, Andrew Jones wrote:
> 
> 
> ----- Original Message -----
> > Il 16/09/2014 14:43, Andrew Jones ha scritto:
> > > I don't think we need to worry about this case. AFAIU, enabling the
> > > caches for a particular cpu shouldn't require any synchronization.
> > > So we should be able to do
> > > 
> > >     enable caches
> > >     spin_lock
> > >     start other processors
> > >     spin_unlock
> > 
> > Ok, I'll test and apply your patch then.
> > 
> > Once you change the code to enable caches, please consider hanging on
> > spin_lock with caches disabled.
> 
> Unfortunately I can't do that without changing spin_lock into a wrapper
> function. Early setup code calls functions that use spin_locks, e.g.
> puts(), and we won't want to move the cache enablement into early setup
> code, as that should be left for unit tests to turn on off as they wish.
> Thus we either need to be able to change the spin_lock implementation
> dynamically, or just leave the test/return as is.
> 
My take on this whole thing is that we're doing something fundamentally
wrong.  I think what we should do is to always enable the MMU for
running actual tests, bringing up multiple CPUs etc.  We could have an
early_printf() that doesn't use the spinlock.  I think this will just be
a more stable setup.

Do we have clear ideas of which kinds of tests it would make sense to
run without the MMU turned on?  If we can be more concrete on this
subject, perhaps a special path (or build) that doesn't enable the MMU
for running the aforementioned test cases could be added.

-Christoffer
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrew Jones Oct. 30, 2014, 3:59 p.m. UTC | #13
On Fri, Sep 26, 2014 at 09:51:15AM +0200, Christoffer Dall wrote:
> On Tue, Sep 16, 2014 at 08:57:31AM -0400, Andrew Jones wrote:
> > 
> > 
> > ----- Original Message -----
> > > Il 16/09/2014 14:43, Andrew Jones ha scritto:
> > > > I don't think we need to worry about this case. AFAIU, enabling the
> > > > caches for a particular cpu shouldn't require any synchronization.
> > > > So we should be able to do
> > > > 
> > > >     enable caches
> > > >     spin_lock
> > > >     start other processors
> > > >     spin_unlock
> > > 
> > > Ok, I'll test and apply your patch then.
> > > 
> > > Once you change the code to enable caches, please consider hanging on
> > > spin_lock with caches disabled.
> > 
> > Unfortunately I can't do that without changing spin_lock into a wrapper
> > function. Early setup code calls functions that use spin_locks, e.g.
> > puts(), and we won't want to move the cache enablement into early setup
> > code, as that should be left for unit tests to turn on off as they wish.
> > Thus we either need to be able to change the spin_lock implementation
> > dynamically, or just leave the test/return as is.
> > 
> My take on this whole thing is that we're doing something fundamentally
> wrong.  I think what we should do is to always enable the MMU for
> running actual tests, bringing up multiple CPUs etc.  We could have an
> early_printf() that doesn't use the spinlock.  I think this will just be
> a more stable setup.
> 
> Do we have clear ideas of which kinds of tests it would make sense to
> run without the MMU turned on?  If we can be more concrete on this
> subject, perhaps a special path (or build) that doesn't enable the MMU
> for running the aforementioned test cases could be added.
> 

Finally carving out kvm-unit-tests time again and fixed this properly.
A series is on the list "[kvm-unit-tests PATCH 0/6] arm: enable MMU".

drew
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/lib/arm/asm/setup.h b/lib/arm/asm/setup.h
index 21445ef2085fc..9c54c184e2866 100644
--- a/lib/arm/asm/setup.h
+++ b/lib/arm/asm/setup.h
@@ -20,6 +20,8 @@  extern phys_addr_t __phys_offset, __phys_end;
 #define PHYS_SIZE		(1ULL << PHYS_SHIFT)
 #define PHYS_MASK		(PHYS_SIZE - 1ULL)
 
+extern bool mem_caches_enabled;
+
 #define L1_CACHE_SHIFT		6
 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 #define SMP_CACHE_BYTES		L1_CACHE_BYTES
diff --git a/lib/arm/setup.c b/lib/arm/setup.c
index 3941c9757dcb2..f7ed639c9d499 100644
--- a/lib/arm/setup.c
+++ b/lib/arm/setup.c
@@ -25,6 +25,7 @@  u32 cpus[NR_CPUS] = { [0 ... NR_CPUS-1] = (~0UL) };
 int nr_cpus;
 
 phys_addr_t __phys_offset, __phys_end;
+bool mem_caches_enabled;
 
 static void cpu_set(int fdtnode __unused, u32 regval, void *info __unused)
 {
diff --git a/lib/arm/spinlock.c b/lib/arm/spinlock.c
index d8a6d4c3383d6..43539c5e84062 100644
--- a/lib/arm/spinlock.c
+++ b/lib/arm/spinlock.c
@@ -1,12 +1,22 @@ 
 #include "libcflat.h"
 #include "asm/spinlock.h"
 #include "asm/barrier.h"
+#include "asm/setup.h"
 
 void spin_lock(struct spinlock *lock)
 {
 	u32 val, fail;
 
 	dmb();
+
+	/*
+	 * Without caches enabled Load-Exclusive instructions may fail.
+	 * In that case we do nothing, and just hope the caller knows
+	 * what they're doing.
+	 */
+	if (!mem_caches_enabled)
+		return;
+
 	do {
 		asm volatile(
 		"1:	ldrex	%0, [%2]\n"