diff mbox series

[v4,3/4] arm64: compat: Refactor aarch32_alloc_vdso_pages()

Message ID 20190415094937.13518-4-vincenzo.frascino@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: compat: Add kuser helpers config option | expand

Commit Message

Vincenzo Frascino April 15, 2019, 9:49 a.m. UTC
aarch32_alloc_vdso_pages() needs to be refactored to make it
easier to disable kuser helpers.

Divide the function in aarch32_alloc_kuser_vdso_page() and
aarch32_alloc_sigreturn_vdso_page().

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/kernel/vdso.c | 73 ++++++++++++++++++++++++++--------------
 1 file changed, 48 insertions(+), 25 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 79fd7a65ae55..22e8b039cfe6 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -77,46 +77,69 @@  static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
 	},
 };
 
-static int __init aarch32_alloc_vdso_pages(void)
+static int aarch32_alloc_kuser_vdso_page(void)
 {
 	extern char __kuser_helper_start[], __kuser_helper_end[];
-	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
-	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
-	unsigned long vdso_pages[C_PAGES];
+	unsigned long vdso_page;
 
-	vdso_pages[C_VECTORS] = get_zeroed_page(GFP_ATOMIC);
-	if (!vdso_pages[C_VECTORS])
+	vdso_page = get_zeroed_page(GFP_ATOMIC);
+	if (!vdso_page)
 		return -ENOMEM;
 
-	vdso_pages[C_SIGPAGE] = get_zeroed_page(GFP_ATOMIC);
-	if (!vdso_pages[C_SIGPAGE]) {
-		/*
-		 * free_page() it is required to avoid to leak the vectors page
-		 * if the allocation of sigpage fails.
-		 */
-		free_page(vdso_pages[C_VECTORS]);
-		return -ENOMEM;
-	}
-
 	/* kuser helpers */
-	memcpy((void *)(vdso_pages[C_VECTORS] + 0x1000 - kuser_sz),
+	memcpy((void *)(vdso_page + 0x1000 - kuser_sz),
 	       __kuser_helper_start,
 	       kuser_sz);
 
+	aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
+
+	flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
+
+	return 0;
+}
+
+static int aarch32_alloc_sigreturn_vdso_page(void)
+{
+	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+	unsigned long vdso_page;
+
+	vdso_page = get_zeroed_page(GFP_ATOMIC);
+	if (!vdso_page)
+		return -ENOMEM;
+
 	/* sigreturn code */
-	memcpy((void *)vdso_pages[C_SIGPAGE],
+	memcpy((void *)vdso_page,
 	       __aarch32_sigret_code_start,
 	       sigret_sz);
 
-	flush_icache_range(vdso_pages[C_VECTORS],
-			   vdso_pages[C_VECTORS] + PAGE_SIZE);
-	flush_icache_range(vdso_pages[C_SIGPAGE],
-			   vdso_pages[C_SIGPAGE] + PAGE_SIZE);
+	aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(vdso_page);
+
+	flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
+
+	return 0;
+}
+
+static int __init aarch32_alloc_vdso_pages(void)
+{
+	int ret;
+
+	ret = aarch32_alloc_kuser_vdso_page();
+	if (ret)
+		return ret;
 
-	aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_pages[C_VECTORS]);
-	aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(vdso_pages[C_SIGPAGE]);
+	ret = aarch32_alloc_sigreturn_vdso_page();
+	if (ret) {
+		unsigned long vectors_addr = (unsigned long)page_to_virt(
+						aarch32_vdso_pages[C_VECTORS]);
+		/*
+		 * free_page() it is required to avoid to leak the vectors page
+		 * if the allocation of sigpage fails.
+		 */
+		free_page(vectors_addr);
+		return ret;
+	}
 
 	return 0;
 }