diff mbox series

[V5,11/12] arm64: mm: Remove vabits_user

Message ID 20190807155524.5112-12-steve.capper@arm.com (mailing list archive)
State New, archived
Headers show
Series 52-bit kernel + user VAs | expand

Commit Message

Steve Capper Aug. 7, 2019, 3:55 p.m. UTC
Previous patches have enabled 52-bit kernel + user VAs and there is no
longer any scenario where user VA != kernel VA size.

This patch removes the, now redundant, vabits_user variable and replaces
usage with vabits_actual where appropriate.

Signed-off-by: Steve Capper <steve.capper@arm.com>

---

New in V5
---
 arch/arm64/include/asm/memory.h       | 3 ---
 arch/arm64/include/asm/pointer_auth.h | 2 +-
 arch/arm64/include/asm/processor.h    | 2 +-
 arch/arm64/kernel/head.S              | 7 +------
 arch/arm64/mm/fault.c                 | 3 +--
 arch/arm64/mm/mmu.c                   | 2 --
 arch/arm64/mm/proc.S                  | 2 +-
 7 files changed, 5 insertions(+), 16 deletions(-)

Comments

Catalin Marinas Aug. 7, 2019, 4:17 p.m. UTC | #1
On Wed, Aug 07, 2019 at 04:55:23PM +0100, Steve Capper wrote:
> Previous patches have enabled 52-bit kernel + user VAs and there is no
> longer any scenario where user VA != kernel VA size.
> 
> This patch removes the, now redundant, vabits_user variable and replaces
> usage with vabits_actual where appropriate.
> 
> Signed-off-by: Steve Capper <steve.capper@arm.com>

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3b5d1327035e..56e79da139c2 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -194,9 +194,6 @@  static inline unsigned long kaslr_offset(void)
 	return kimage_vaddr - KIMAGE_VADDR;
 }
 
-/* the actual size of a user virtual address */
-extern u64			vabits_user;
-
 /*
  * Allow all memory at the discovery stage. We will clip it later.
  */
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index d328540cb85e..7a24bad1a58b 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -69,7 +69,7 @@  extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
  * The EL0 pointer bits used by a pointer authentication code.
  * This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
  */
-#define ptrauth_user_pac_mask()	GENMASK(54, vabits_user)
+#define ptrauth_user_pac_mask()	GENMASK(54, vabits_actual)
 
 /* Only valid for EL0 TTBR0 instruction pointers */
 static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 0e1f2770192a..e4c93945e477 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -43,7 +43,7 @@ 
  */
 
 #define DEFAULT_MAP_WINDOW_64	(UL(1) << VA_BITS_MIN)
-#define TASK_SIZE_64		(UL(1) << vabits_user)
+#define TASK_SIZE_64		(UL(1) << vabits_actual)
 
 #ifdef CONFIG_COMPAT
 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c8446f8c81f5..949b001a73bb 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -316,11 +316,6 @@  __create_page_tables:
 #endif
 	mov	x5, #VA_BITS_MIN
 1:
-	adr_l	x6, vabits_user
-	str	x5, [x6]
-	dmb	sy
-	dc	ivac, x6		// Invalidate potentially stale cache line
-
 	adr_l	x6, vabits_actual
 	str	x5, [x6]
 	dmb	sy
@@ -795,7 +790,7 @@  ENDPROC(__enable_mmu)
 
 ENTRY(__cpu_secondary_check52bitva)
 #ifdef CONFIG_ARM64_VA_BITS_52
-	ldr_l	x0, vabits_user
+	ldr_l	x0, vabits_actual
 	cmp	x0, #52
 	b.ne	2f
 
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 86fc1aff3462..3ef0a9f64240 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -140,8 +140,7 @@  static void show_pte(unsigned long addr)
 
 	pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
 		 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
-		 mm == &init_mm ? vabits_actual : (int)vabits_user,
-		 (unsigned long)virt_to_phys(mm->pgd));
+		 vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
 	pgdp = pgd_offset(mm, addr);
 	pgd = READ_ONCE(*pgdp);
 	pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 07b30e6d17f8..0c8f7e55f859 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -40,8 +40,6 @@ 
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
-u64 vabits_user __ro_after_init;
-EXPORT_SYMBOL(vabits_user);
 
 u64 __section(".mmuoff.data.write") vabits_actual;
 EXPORT_SYMBOL(vabits_actual);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 8b021c5c0884..391f9cabfe60 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -439,7 +439,7 @@  ENTRY(__cpu_setup)
 	tcr_clear_errata_bits x10, x9, x5
 
 #ifdef CONFIG_ARM64_VA_BITS_52
-	ldr_l		x9, vabits_user
+	ldr_l		x9, vabits_actual
 	sub		x9, xzr, x9
 	add		x9, x9, #64
 	tcr_set_t1sz	x10, x9