diff mbox series

[v3,07/11] kvm: arm64: Duplicate arm64_ssbd_callback_required for nVHE hyp

Message ID 20200916173439.32265-8-dbrazdil@google.com (mailing list archive)
State New, archived
Headers show
Series Independent per-CPU data section for nVHE | expand

Commit Message

David Brazdil Sept. 16, 2020, 5:34 p.m. UTC
Hyp keeps track of which cores require SSBD callback by accessing a
kernel-proper global variable. Create an nVHE symbol of the same name
and copy the value from kernel proper to nVHE at KVM init time.

Done in preparation for separating percpu memory owned by kernel
proper and nVHE.

Signed-off-by: David Brazdil <dbrazdil@google.com>
---
 arch/arm64/include/asm/kvm_mmu.h | 10 +++++++---
 arch/arm64/kernel/image-vars.h   |  1 -
 arch/arm64/kvm/arm.c             |  2 +-
 arch/arm64/kvm/hyp/nvhe/switch.c |  3 +++
 4 files changed, 11 insertions(+), 5 deletions(-)

Comments

Will Deacon Sept. 18, 2020, 11:59 a.m. UTC | #1
On Wed, Sep 16, 2020 at 06:34:35PM +0100, David Brazdil wrote:
> Hyp keeps track of which cores require SSBD callback by accessing a
> kernel-proper global variable. Create an nVHE symbol of the same name
> and copy the value from kernel proper to nVHE at KVM init time.
> 
> Done in preparation for separating percpu memory owned by kernel
> proper and nVHE.
> 
> Signed-off-by: David Brazdil <dbrazdil@google.com>
> ---
>  arch/arm64/include/asm/kvm_mmu.h | 10 +++++++---
>  arch/arm64/kernel/image-vars.h   |  1 -
>  arch/arm64/kvm/arm.c             |  2 +-
>  arch/arm64/kvm/hyp/nvhe/switch.c |  3 +++
>  4 files changed, 11 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 189839c3706a..9db93da35606 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -529,23 +529,27 @@ static inline int kvm_map_vectors(void)
>  
>  #ifdef CONFIG_ARM64_SSBD
>  DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
> +DECLARE_KVM_NVHE_PER_CPU(u64, arm64_ssbd_callback_required);
>  
> -static inline int hyp_map_aux_data(void)
> +static inline int hyp_init_aux_data(void)
>  {
>  	int cpu, err;
>  
>  	for_each_possible_cpu(cpu) {
>  		u64 *ptr;
>  
> -		ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
> +		ptr = per_cpu_ptr_nvhe(arm64_ssbd_callback_required, cpu);
>  		err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
>  		if (err)
>  			return err;
> +
> +		/* Copy value from kernel to hyp. */
> +		*ptr = per_cpu(arm64_ssbd_callback_required, cpu);

Hmm. Is this correct for late arriving CPUs, where we don't know whether
a callback is required at the point we do the copy?

That sounds fiddly to resolve, but this _might_ all be moot because I'm
about to post a series that allows us to remove the hyp mapping of this
variable entirely. So leave this for now, but maybe stick a comment in
that it doesn't work for late cpus.

Will
David Brazdil Sept. 22, 2020, 6:07 p.m. UTC | #2
> >  		u64 *ptr;
> >  
> > -		ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
> > +		ptr = per_cpu_ptr_nvhe(arm64_ssbd_callback_required, cpu);
> >  		err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
> >  		if (err)
> >  			return err;
> > +
> > +		/* Copy value from kernel to hyp. */
> > +		*ptr = per_cpu(arm64_ssbd_callback_required, cpu);
> 
> Hmm. Is this correct for late arriving CPUs, where we don't know whether
> a callback is required at the point we do the copy?
> 
> That sounds fiddly to resolve, but this _might_ all be moot because I'm
> about to post a series that allows us to remove the hyp mapping of this
> variable entirely. So leave this for now, but maybe stick a comment in
> that it doesn't work for late cpus.

Ah, good point. I'll move the value copy at the end of cpu_init_hyp_mode().
It must be known at that point. And if your series gets rid of this completely,
even better.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 189839c3706a..9db93da35606 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -529,23 +529,27 @@  static inline int kvm_map_vectors(void)
 
 #ifdef CONFIG_ARM64_SSBD
 DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+DECLARE_KVM_NVHE_PER_CPU(u64, arm64_ssbd_callback_required);
 
-static inline int hyp_map_aux_data(void)
+static inline int hyp_init_aux_data(void)
 {
 	int cpu, err;
 
 	for_each_possible_cpu(cpu) {
 		u64 *ptr;
 
-		ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
+		ptr = per_cpu_ptr_nvhe(arm64_ssbd_callback_required, cpu);
 		err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
 		if (err)
 			return err;
+
+		/* Copy value from kernel to hyp. */
+		*ptr = per_cpu(arm64_ssbd_callback_required, cpu);
 	}
 	return 0;
 }
 #else
-static inline int hyp_map_aux_data(void)
+static inline int hyp_init_aux_data(void)
 {
 	return 0;
 }
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 76da2ad1010c..59d12a0b4622 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -67,7 +67,6 @@  KVM_NVHE_ALIAS(kvm_patch_vector_branch);
 KVM_NVHE_ALIAS(kvm_update_va_mask);
 
 /* Global kernel state accessed by nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
 KVM_NVHE_ALIAS(kvm_host_data);
 KVM_NVHE_ALIAS(kvm_vgic_global_state);
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index b588c3b5c2f0..3bdc2661d276 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1552,7 +1552,7 @@  static int init_hyp_mode(void)
 		}
 	}
 
-	err = hyp_map_aux_data();
+	err = hyp_init_aux_data();
 	if (err)
 		kvm_err("Cannot map host auxiliary data: %d\n", err);
 
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index cc4f8e790fb3..4662df6330d7 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -27,6 +27,9 @@ 
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 
+/* Non-VHE copy of the kernel symbol. */
+DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
 static void __activate_traps(struct kvm_vcpu *vcpu)
 {
 	u64 val;