diff mbox series

[v2,08/12] arm64: mm: Logic to make offset_ttbr1 conditional

Message ID 20190528161026.13193-9-steve.capper@arm.com (mailing list archive)
State New, archived
Headers show
Series 52-bit kernel + user VAs | expand

Commit Message

Steve Capper May 28, 2019, 4:10 p.m. UTC
When running with a 52-bit userspace VA and a 48-bit kernel VA we offset
ttbr1_el1 to allow the kernel pagetables with a 52-bit PTRS_PER_PGD to
be used for both userspace and kernel.

Moving on to a 52-bit kernel VA we no longer require this offset to
ttbr1_el1 should we be running on a system with HW support for 52-bit
VAs.

This patch introduces alternative logic to offset_ttbr1 and expands out
the very early case in head.S. We need to use the alternative framework
as offset_ttbr1 is used in places in the kernel where it is not possible
to safely adrp address kernel constants (such as the kpti paths); thus
code patching is the safer route.

Signed-off-by: Steve Capper <steve.capper@arm.com>
---
 arch/arm64/include/asm/assembler.h | 10 +++++++++-
 arch/arm64/include/asm/cpucaps.h   |  3 ++-
 arch/arm64/kernel/cpufeature.c     | 18 ++++++++++++++++++
 arch/arm64/kernel/head.S           | 14 +++++++++++++-
 arch/arm64/kernel/hibernate-asm.S  |  1 +
 5 files changed, 43 insertions(+), 3 deletions(-)

Comments

Catalin Marinas June 10, 2019, 2:18 p.m. UTC | #1
Hi Steve,

On Tue, May 28, 2019 at 05:10:22PM +0100, Steve Capper wrote:
> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index 039fbd822ec6..a42c392ed1e1 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -548,6 +548,14 @@ USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
>  	.macro	offset_ttbr1, ttbr
>  #ifdef CONFIG_ARM64_USER_VA_BITS_52
>  	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
> +#endif
> +
> +#ifdef CONFIG_ARM64_USER_KERNEL_VA_BITS_52
> +alternative_if_not ARM64_HAS_52BIT_VA
> +	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
> +alternative_else
> +	nop
> +alternative_endif
>  #endif
>  	.endm

As a nitpick, you could write alternative_else_not_endif instead of the
last three lines.

Anyway, we use offset_ttbr1 in a few early cases via
idmap_cpu_replace_ttbr1 where the alternative framework hasn't got the
chance to patch the instructions. I suggest you open-code the feature
check in here, I don't think we use this on any fast path.
Steve Capper June 12, 2019, 10:58 a.m. UTC | #2
On Mon, Jun 10, 2019 at 03:18:13PM +0100, Catalin Marinas wrote:
> Hi Steve,
> 

Hi Catalin,

> On Tue, May 28, 2019 at 05:10:22PM +0100, Steve Capper wrote:
> > diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> > index 039fbd822ec6..a42c392ed1e1 100644
> > --- a/arch/arm64/include/asm/assembler.h
> > +++ b/arch/arm64/include/asm/assembler.h
> > @@ -548,6 +548,14 @@ USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
> >  	.macro	offset_ttbr1, ttbr
> >  #ifdef CONFIG_ARM64_USER_VA_BITS_52
> >  	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
> > +#endif
> > +
> > +#ifdef CONFIG_ARM64_USER_KERNEL_VA_BITS_52
> > +alternative_if_not ARM64_HAS_52BIT_VA
> > +	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
> > +alternative_else
> > +	nop
> > +alternative_endif
> >  #endif
> >  	.endm
> 
> As a nitpick, you could write alternative_else_not_endif instead of the
> last three lines.
> 

Thanks yeah that makes it easier to read.

> Anyway, we use offset_ttbr1 in a few early cases via
> idmap_cpu_replace_ttbr1 where the alternative framework hasn't got the
> chance to patch the instructions. I suggest you open-code the feature
> check in here, I don't think we use this on any fast path.
> 

Apologies for not spotting that, okay, I'll query the vabits_actual
inside idmap_cpu_replace_ttbr1.

Cheers,
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 039fbd822ec6..a42c392ed1e1 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -548,6 +548,14 @@  USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
 	.macro	offset_ttbr1, ttbr
 #ifdef CONFIG_ARM64_USER_VA_BITS_52
 	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
+#endif
+
+#ifdef CONFIG_ARM64_USER_KERNEL_VA_BITS_52
+alternative_if_not ARM64_HAS_52BIT_VA
+	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
+alternative_else
+	nop
+alternative_endif
 #endif
 	.endm
 
@@ -557,7 +565,7 @@  USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
  * to be nop'ed out when dealing with 52-bit kernel VAs.
  */
 	.macro	restore_ttbr1, ttbr
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#if defined(CONFIG_ARM64_USER_VA_BITS_52) || defined(CONFIG_ARM64_KERNEL_VA_BITS_52)
 	bic	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
 #endif
 	.endm
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index defdc67d9ab4..b317b8761744 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -62,7 +62,8 @@ 
 #define ARM64_HAS_GENERIC_AUTH_IMP_DEF		41
 #define ARM64_HAS_IRQ_PRIO_MASKING		42
 #define ARM64_HAS_DCPODP			43
+#define ARM64_HAS_52BIT_VA			44
 
-#define ARM64_NCAPS				44
+#define ARM64_NCAPS				45
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index ca27e08e3d8a..f9d8a5c8d8ce 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -957,6 +957,16 @@  has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
 	return has_cpuid_feature(entry, scope);
 }
 
+#ifdef CONFIG_ARM64_USER_KERNEL_VA_BITS_52
+extern u64 vabits_actual;
+static bool __maybe_unused
+has_52bit_kernel_va(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	return vabits_actual == 52;
+}
+
+#endif /* CONFIG_ARM64_USER_KERNEL_VA_BITS_52 */
+
 static bool __meltdown_safe = true;
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
@@ -1558,6 +1568,14 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.min_field_value = 1,
 	},
 #endif
+#ifdef CONFIG_ARM64_USER_KERNEL_VA_BITS_52
+	{
+		.desc = "52-bit kernel VA",
+		.capability = ARM64_HAS_52BIT_VA,
+		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+		.matches = has_52bit_kernel_va,
+	},
+#endif /* CONFIG_ARM64_USER_KERNEL_VA_BITS_52 */
 	{},
 };
 
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b3335e639b6d..8bc1b533a912 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -788,7 +788,19 @@  ENTRY(__enable_mmu)
 	phys_to_ttbr x1, x1
 	phys_to_ttbr x2, x2
 	msr	ttbr0_el1, x2			// load TTBR0
-	offset_ttbr1 x1
+
+#if defined(CONFIG_ARM64_USER_VA_BITS_52)
+	orr     x1, x1, #TTBR1_BADDR_4852_OFFSET
+#endif
+
+#if defined(CONFIG_ARM64_USER_KERNEL_VA_BITS_52)
+	ldr_l	x3, vabits_actual
+	cmp	x3, #52
+	b.eq	1f
+	orr     x1, x1, #TTBR1_BADDR_4852_OFFSET
+1:
+#endif
+
 	msr	ttbr1_el1, x1			// load TTBR1
 	isb
 	msr	sctlr_el1, x0
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index fe36d85c60bd..d32725a2b77f 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -19,6 +19,7 @@ 
 #include <linux/linkage.h>
 #include <linux/errno.h>
 
+#include <asm/alternative.h>
 #include <asm/asm-offsets.h>
 #include <asm/assembler.h>
 #include <asm/cputype.h>