diff mbox series

[v1,06/24] kvm: arm64: Support per_cpu_ptr in nVHE hyp code

Message ID 20201109113233.9012-7-dbrazdil@google.com (mailing list archive)
State New, archived
Headers show
Series Opt-in always-on nVHE hypervisor | expand

Commit Message

David Brazdil Nov. 9, 2020, 11:32 a.m. UTC
When compiling with __KVM_NVHE_HYPERVISOR__ redefine per_cpu_offset() to
__hyp_per_cpu_offset() which looks up the base of the nVHE per-CPU
region of the given cpu and computes its offset from the
.hyp.data..percpu section.

This enables use of per_cpu_ptr() helpers in nVHE hyp code. Until now
only this_cpu_ptr() was supported by setting TPIDR_EL2.

Signed-off-by: David Brazdil <dbrazdil@google.com>
---
 arch/arm64/include/asm/percpu.h  |  6 ++++++
 arch/arm64/kernel/image-vars.h   |  3 +++
 arch/arm64/kvm/hyp/nvhe/Makefile |  3 ++-
 arch/arm64/kvm/hyp/nvhe/percpu.c | 22 ++++++++++++++++++++++
 4 files changed, 33 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/kvm/hyp/nvhe/percpu.c

Comments

Marc Zyngier Nov. 10, 2020, 3:08 p.m. UTC | #1
On 2020-11-09 11:32, David Brazdil wrote:
> When compiling with __KVM_NVHE_HYPERVISOR__ redefine per_cpu_offset() 
> to
> __hyp_per_cpu_offset() which looks up the base of the nVHE per-CPU
> region of the given cpu and computes its offset from the
> .hyp.data..percpu section.
> 
> This enables use of per_cpu_ptr() helpers in nVHE hyp code. Until now
> only this_cpu_ptr() was supported by setting TPIDR_EL2.
> 
> Signed-off-by: David Brazdil <dbrazdil@google.com>
> ---
>  arch/arm64/include/asm/percpu.h  |  6 ++++++
>  arch/arm64/kernel/image-vars.h   |  3 +++
>  arch/arm64/kvm/hyp/nvhe/Makefile |  3 ++-
>  arch/arm64/kvm/hyp/nvhe/percpu.c | 22 ++++++++++++++++++++++
>  4 files changed, 33 insertions(+), 1 deletion(-)
>  create mode 100644 arch/arm64/kvm/hyp/nvhe/percpu.c
> 
> diff --git a/arch/arm64/include/asm/percpu.h 
> b/arch/arm64/include/asm/percpu.h
> index 1599e17379d8..8f1661603b78 100644
> --- a/arch/arm64/include/asm/percpu.h
> +++ b/arch/arm64/include/asm/percpu.h
> @@ -239,6 +239,12 @@ PERCPU_RET_OP(add, add, ldadd)
>  #define this_cpu_cmpxchg_8(pcp, o, n)	\
>  	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
> 
> +#ifdef __KVM_NVHE_HYPERVISOR__
> +extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
> +#define __per_cpu_offset
> +#define per_cpu_offset(cpu)	__hyp_per_cpu_offset((cpu))
> +#endif
> +
>  #include <asm-generic/percpu.h>
> 
>  /* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its
> dependencies. */
> diff --git a/arch/arm64/kernel/image-vars.h 
> b/arch/arm64/kernel/image-vars.h
> index c615b285ff5b..78a42a7cdb72 100644
> --- a/arch/arm64/kernel/image-vars.h
> +++ b/arch/arm64/kernel/image-vars.h
> @@ -103,6 +103,9 @@ KVM_NVHE_ALIAS(gic_nonsecure_priorities);
>  KVM_NVHE_ALIAS(__start___kvm_ex_table);
>  KVM_NVHE_ALIAS(__stop___kvm_ex_table);
> 
> +/* Array containing bases of nVHE per-CPU memory regions. */
> +KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
> +
>  #endif /* CONFIG_KVM */
> 
>  #endif /* __ARM64_KERNEL_IMAGE_VARS_H */
> diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile 
> b/arch/arm64/kvm/hyp/nvhe/Makefile
> index ddde15fe85f2..c45f440cce51 100644
> --- a/arch/arm64/kvm/hyp/nvhe/Makefile
> +++ b/arch/arm64/kvm/hyp/nvhe/Makefile
> @@ -6,7 +6,8 @@
>  asflags-y := -D__KVM_NVHE_HYPERVISOR__
>  ccflags-y := -D__KVM_NVHE_HYPERVISOR__
> 
> -obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o
> host.o hyp-main.o
> +obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o 
> host.o \
> +	 hyp-main.o percpu.o
>  obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o 
> ../entry.o \
>  	 ../fpsimd.o ../hyp-entry.o
> 
> diff --git a/arch/arm64/kvm/hyp/nvhe/percpu.c 
> b/arch/arm64/kvm/hyp/nvhe/percpu.c
> new file mode 100644
> index 000000000000..5fd0c5696907
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/nvhe/percpu.c
> @@ -0,0 +1,22 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2020 - Google LLC
> + * Author: David Brazdil <dbrazdil@google.com>
> + */
> +
> +#include <asm/kvm_asm.h>
> +#include <asm/kvm_hyp.h>
> +#include <asm/kvm_mmu.h>
> +
> +unsigned long __hyp_per_cpu_offset(unsigned int cpu)
> +{
> +	unsigned long *cpu_base_array;
> +	unsigned long this_cpu_base;
> +
> +	if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base))
> +		hyp_panic();
> +
> +	cpu_base_array = kern_hyp_va(&kvm_arm_hyp_percpu_base[0]);

There is no guarantee that this will not generate a PC relative
addressing, resulting in kern_hyp_va() being applied twice.

Consider using hyp_symbol_addr() instead, which always does the right
by forcing a PC relative addressing and not subsequently mangling
the address.

> +	this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
> +	return this_cpu_base - (unsigned long)&__per_cpu_start;

And this is the opposite case: if the compiler generates an absolute
address, you're toast. Yes, this is just as unlikely, but hey...
Same remedy should apply.

Thanks,

         M.
David Brazdil Nov. 11, 2020, 12:32 p.m. UTC | #2
> > +
> > +	cpu_base_array = kern_hyp_va(&kvm_arm_hyp_percpu_base[0]);
> 
> There is no guarantee that this will not generate a PC relative
> addressing, resulting in kern_hyp_va() being applied twice.
> 
> Consider using hyp_symbol_addr() instead, which always does the right
> by forcing a PC relative addressing and not subsequently mangling
> the address.
> 
> > +	this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
> > +	return this_cpu_base - (unsigned long)&__per_cpu_start;
> 
> And this is the opposite case: if the compiler generates an absolute
> address, you're toast. Yes, this is just as unlikely, but hey...
> Same remedy should apply.

Good point, and I'll probably keep forgetting about this in the future. Now
that all .hyp.text is only executed under hyp page tables, should we start
thinking about fixing up the relocations?
Marc Zyngier Nov. 11, 2020, 12:46 p.m. UTC | #3
On 2020-11-11 12:32, David Brazdil wrote:
>> > +
>> > +	cpu_base_array = kern_hyp_va(&kvm_arm_hyp_percpu_base[0]);
>> 
>> There is no guarantee that this will not generate a PC relative
>> addressing, resulting in kern_hyp_va() being applied twice.
>> 
>> Consider using hyp_symbol_addr() instead, which always does the right
>> by forcing a PC relative addressing and not subsequently mangling
>> the address.
>> 
>> > +	this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
>> > +	return this_cpu_base - (unsigned long)&__per_cpu_start;
>> 
>> And this is the opposite case: if the compiler generates an absolute
>> address, you're toast. Yes, this is just as unlikely, but hey...
>> Same remedy should apply.
> 
> Good point, and I'll probably keep forgetting about this in the future. 
> Now
> that all .hyp.text is only executed under hyp page tables, should we 
> start
> thinking about fixing up the relocations?

Why not, if you can deal with the hypervisor text being mapped at a 
random
location, and make sure that the kernel doesn't process the relocations
for you. This would certainly save us a lot of runtime offsetting (which
I'm adding to in a separate series).

         M.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 1599e17379d8..8f1661603b78 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -239,6 +239,12 @@  PERCPU_RET_OP(add, add, ldadd)
 #define this_cpu_cmpxchg_8(pcp, o, n)	\
 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
 
+#ifdef __KVM_NVHE_HYPERVISOR__
+extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
+#define __per_cpu_offset
+#define per_cpu_offset(cpu)	__hyp_per_cpu_offset((cpu))
+#endif
+
 #include <asm-generic/percpu.h>
 
 /* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index c615b285ff5b..78a42a7cdb72 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -103,6 +103,9 @@  KVM_NVHE_ALIAS(gic_nonsecure_priorities);
 KVM_NVHE_ALIAS(__start___kvm_ex_table);
 KVM_NVHE_ALIAS(__stop___kvm_ex_table);
 
+/* Array containing bases of nVHE per-CPU memory regions. */
+KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
+
 #endif /* CONFIG_KVM */
 
 #endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index ddde15fe85f2..c45f440cce51 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -6,7 +6,8 @@ 
 asflags-y := -D__KVM_NVHE_HYPERVISOR__
 ccflags-y := -D__KVM_NVHE_HYPERVISOR__
 
-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-main.o
+obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
+	 hyp-main.o percpu.o
 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
 	 ../fpsimd.o ../hyp-entry.o
 
diff --git a/arch/arm64/kvm/hyp/nvhe/percpu.c b/arch/arm64/kvm/hyp/nvhe/percpu.c
new file mode 100644
index 000000000000..5fd0c5696907
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/percpu.c
@@ -0,0 +1,22 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 - Google LLC
+ * Author: David Brazdil <dbrazdil@google.com>
+ */
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+
+unsigned long __hyp_per_cpu_offset(unsigned int cpu)
+{
+	unsigned long *cpu_base_array;
+	unsigned long this_cpu_base;
+
+	if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base))
+		hyp_panic();
+
+	cpu_base_array = kern_hyp_va(&kvm_arm_hyp_percpu_base[0]);
+	this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
+	return this_cpu_base - (unsigned long)&__per_cpu_start;
+}