diff mbox series

[v2] arm64: KVM: Invoke compute_layout() before alternatives are applied

Message ID 20191128195805.c2pryug4ohmcoqwd@linutronix.de (mailing list archive)
State Mainlined
Commit 0492747c72a3db0425a234abafb763c5b28c845d
Headers show
Series [v2] arm64: KVM: Invoke compute_layout() before alternatives are applied | expand

Commit Message

Sebastian Andrzej Siewior Nov. 28, 2019, 7:58 p.m. UTC
compute_layout() is invoked as part of an alternative fixup under
stop_machine(). This function invokes get_random_long() which acquires a
sleeping lock on -RT which can not be acquired in this context.

Rename compute_layout() to kvm_compute_layout() and invoke it before
stop_machine() applies the alternatives. Add a __init prefix to
kvm_compute_layout() because the caller has it, too (and so the code can be
discarded after boot).

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---

v1…v2: Move code as suggested by James Morse. Didn't add his Reviewed-by
       (as suggested) because I'm not sure that I got everything
       correct.

 arch/arm64/include/asm/kvm_mmu.h | 1 +
 arch/arm64/kernel/smp.c          | 4 ++++
 arch/arm64/kvm/va_layout.c       | 8 +-------
 3 files changed, 6 insertions(+), 7 deletions(-)

Comments

Marc Zyngier Dec. 6, 2019, 11:22 a.m. UTC | #1
On 2019-11-28 19:58, Sebastian Andrzej Siewior wrote:
> compute_layout() is invoked as part of an alternative fixup under
> stop_machine(). This function invokes get_random_long() which 
> acquires a
> sleeping lock on -RT which can not be acquired in this context.
>
> Rename compute_layout() to kvm_compute_layout() and invoke it before
> stop_machine() applies the alternatives. Add a __init prefix to
> kvm_compute_layout() because the caller has it, too (and so the code 
> can be
> discarded after boot).
>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

Acked-by: Marc Zyngier <maz@kernel.org>

I think this should go via the arm64 tree, so I'll let Catalin
and Will pick this up (unless they think otherwise).

Thanks,

         M.
Catalin Marinas Dec. 6, 2019, 11:46 a.m. UTC | #2
On Fri, Dec 06, 2019 at 11:22:02AM +0000, Marc Zyngier wrote:
> On 2019-11-28 19:58, Sebastian Andrzej Siewior wrote:
> > compute_layout() is invoked as part of an alternative fixup under
> > stop_machine(). This function invokes get_random_long() which acquires a
> > sleeping lock on -RT which can not be acquired in this context.
> > 
> > Rename compute_layout() to kvm_compute_layout() and invoke it before
> > stop_machine() applies the alternatives. Add a __init prefix to
> > kvm_compute_layout() because the caller has it, too (and so the code can
> > be
> > discarded after boot).
> > 
> > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> 
> Acked-by: Marc Zyngier <maz@kernel.org>
> 
> I think this should go via the arm64 tree, so I'll let Catalin
> and Will pick this up (unless they think otherwise).

I can pick this up. Thanks.
Catalin Marinas Dec. 6, 2019, 11:57 a.m. UTC | #3
On Thu, Nov 28, 2019 at 08:58:05PM +0100, Sebastian Andrzej Siewior wrote:
> @@ -408,6 +410,8 @@ static void __init hyp_mode_check(void)
>  			   "CPU: CPUs started in inconsistent modes");
>  	else
>  		pr_info("CPU: All CPU(s) started at EL1\n");
> +	if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
> +		kvm_compute_layout();
>  }

It looks like we call this unconditionally here even if the kernel was
booted at EL1.

>  void __init smp_cpus_done(unsigned int max_cpus)
> diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
> index 2cf7d4b606c38..dab1fea4752aa 100644
> --- a/arch/arm64/kvm/va_layout.c
> +++ b/arch/arm64/kvm/va_layout.c
> @@ -22,7 +22,7 @@ static u8 tag_lsb;
>  static u64 tag_val;
>  static u64 va_mask;
>  
> -static void compute_layout(void)
> +__init void kvm_compute_layout(void)
>  {
>  	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
>  	u64 hyp_va_msb;
> @@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
>  
>  	BUG_ON(nr_inst != 5);
>  
> -	if (!has_vhe() && !va_mask)
> -		compute_layout();
> -
>  	for (i = 0; i < nr_inst; i++) {
>  		u32 rd, rn, insn, oinsn;
>  
> @@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
>  		return;
>  	}
>  
> -	if (!va_mask)
> -		compute_layout();

And here we had a few more checks.

Maybe it's still correct but asking anyway.
Marc Zyngier Dec. 6, 2019, 12:12 p.m. UTC | #4
On 2019-12-06 11:57, Catalin Marinas wrote:
> On Thu, Nov 28, 2019 at 08:58:05PM +0100, Sebastian Andrzej Siewior 
> wrote:
>> @@ -408,6 +410,8 @@ static void __init hyp_mode_check(void)
>>  			   "CPU: CPUs started in inconsistent modes");
>>  	else
>>  		pr_info("CPU: All CPU(s) started at EL1\n");
>> +	if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
>> +		kvm_compute_layout();
>>  }
>
> It looks like we call this unconditionally here even if the kernel 
> was
> booted at EL1.

It has always been the case. My motivation was to be able to debug
this in a guest, because doing this on the host is... painful! ;-)

Feel free to condition it on !EL1 though.

>
>>  void __init smp_cpus_done(unsigned int max_cpus)
>> diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
>> index 2cf7d4b606c38..dab1fea4752aa 100644
>> --- a/arch/arm64/kvm/va_layout.c
>> +++ b/arch/arm64/kvm/va_layout.c
>> @@ -22,7 +22,7 @@ static u8 tag_lsb;
>>  static u64 tag_val;
>>  static u64 va_mask;
>>
>> -static void compute_layout(void)
>> +__init void kvm_compute_layout(void)
>>  {
>>  	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
>>  	u64 hyp_va_msb;
>> @@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct alt_instr 
>> *alt,
>>
>>  	BUG_ON(nr_inst != 5);
>>
>> -	if (!has_vhe() && !va_mask)
>> -		compute_layout();
>> -
>>  	for (i = 0; i < nr_inst; i++) {
>>  		u32 rd, rn, insn, oinsn;
>>
>> @@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_instr 
>> *alt,
>>  		return;
>>  	}
>>
>> -	if (!va_mask)
>> -		compute_layout();
>
> And here we had a few more checks.
>
> Maybe it's still correct but asking anyway.

It should be correct. These checks were there to ensure that we only 
computed
the layout once, and this now happens by construction (calling 
compute_layout
from a single location instead of doing it from the patch callbacks).

Thanks,

        M.
Catalin Marinas Dec. 6, 2019, 12:21 p.m. UTC | #5
On Fri, Dec 06, 2019 at 12:12:10PM +0000, Marc Zyngier wrote:
> On 2019-12-06 11:57, Catalin Marinas wrote:
> > On Thu, Nov 28, 2019 at 08:58:05PM +0100, Sebastian Andrzej Siewior
> > wrote:
> > > @@ -408,6 +410,8 @@ static void __init hyp_mode_check(void)
> > >  			   "CPU: CPUs started in inconsistent modes");
> > >  	else
> > >  		pr_info("CPU: All CPU(s) started at EL1\n");
> > > +	if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
> > > +		kvm_compute_layout();
> > >  }
> > 
> > It looks like we call this unconditionally here even if the kernel was
> > booted at EL1.
> 
> It has always been the case. My motivation was to be able to debug
> this in a guest, because doing this on the host is... painful! ;-)
> 
> Feel free to condition it on !EL1 though.

I'll leave the patch as is. Thanks for confirming.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index befe37d4bc0e5..53d846f1bfe70 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -91,6 +91,7 @@  alternative_cb_end
 
 void kvm_update_va_mask(struct alt_instr *alt,
 			__le32 *origptr, __le32 *updptr, int nr_inst);
+void kvm_compute_layout(void);
 
 static inline unsigned long __kern_hyp_va(unsigned long v)
 {
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index dc9fe879c2793..02d41eae3da86 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -31,6 +31,7 @@ 
 #include <linux/of.h>
 #include <linux/irq_work.h>
 #include <linux/kexec.h>
+#include <linux/kvm_host.h>
 
 #include <asm/alternative.h>
 #include <asm/atomic.h>
@@ -39,6 +40,7 @@ 
 #include <asm/cputype.h>
 #include <asm/cpu_ops.h>
 #include <asm/daifflags.h>
+#include <asm/kvm_mmu.h>
 #include <asm/mmu_context.h>
 #include <asm/numa.h>
 #include <asm/pgtable.h>
@@ -408,6 +410,8 @@  static void __init hyp_mode_check(void)
 			   "CPU: CPUs started in inconsistent modes");
 	else
 		pr_info("CPU: All CPU(s) started at EL1\n");
+	if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
+		kvm_compute_layout();
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 2cf7d4b606c38..dab1fea4752aa 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -22,7 +22,7 @@  static u8 tag_lsb;
 static u64 tag_val;
 static u64 va_mask;
 
-static void compute_layout(void)
+__init void kvm_compute_layout(void)
 {
 	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
 	u64 hyp_va_msb;
@@ -110,9 +110,6 @@  void __init kvm_update_va_mask(struct alt_instr *alt,
 
 	BUG_ON(nr_inst != 5);
 
-	if (!has_vhe() && !va_mask)
-		compute_layout();
-
 	for (i = 0; i < nr_inst; i++) {
 		u32 rd, rn, insn, oinsn;
 
@@ -156,9 +153,6 @@  void kvm_patch_vector_branch(struct alt_instr *alt,
 		return;
 	}
 
-	if (!va_mask)
-		compute_layout();
-
 	/*
 	 * Compute HYP VA by using the same computation as kern_hyp_va()
 	 */