diff mbox series

[v3,3/4] arm64: compat: Refactor aarch32_alloc_vdso_pages()

Message ID 20190402162757.13491-4-vincenzo.frascino@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: compat: Add kuser helpers config option | expand

Commit Message

Vincenzo Frascino April 2, 2019, 4:27 p.m. UTC
aarch32_alloc_vdso_pages() needs to the refactored to make it
easier to disable kuser helpers.

Divide the function in aarch32_alloc_kuser_vdso_page() and
aarch32_alloc_sigreturn_vdso_page().

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
---
 arch/arm64/kernel/vdso.c | 59 +++++++++++++++++++++++++++-------------
 1 file changed, 40 insertions(+), 19 deletions(-)

Comments

Catalin Marinas April 2, 2019, 4:36 p.m. UTC | #1
On Tue, Apr 02, 2019 at 05:27:56PM +0100, Vincenzo Frascino wrote:
> +static int __init aarch32_alloc_vdso_pages(void)
> +{
> +	int kuser_err, sigreturn_err;
> +
> +	kuser_err = aarch32_alloc_kuser_vdso_page();
> +	if (kuser_err)
> +		return kuser_err;
>  
> -	aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_pages[C_VECTORS]);
> -	aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(vdso_pages[C_SIGPAGE]);
> +	sigreturn_err = aarch32_alloc_sigreturn_vdso_page();
> +	if (sigreturn_err)
> +		return sigreturn_err;
>  
>  	return 0;

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>

(nitpick: could have used a single "err" variable; no need to re-spin)
diff mbox series

Patch

diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 16f8fce5c501..7ee676c345ed 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -77,40 +77,61 @@  static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
 	},
 };
 
-static int __init aarch32_alloc_vdso_pages(void)
+static int aarch32_alloc_kuser_vdso_page(void)
 {
 	extern char __kuser_helper_start[], __kuser_helper_end[];
-	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
-	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
-	unsigned long vdso_pages[C_PAGES];
+	unsigned long vdso_page;
 
-	vdso_pages[C_VECTORS] = get_zeroed_page(GFP_ATOMIC);
-	if (!vdso_pages[C_VECTORS])
-		return -ENOMEM;
-
-	vdso_pages[C_SIGPAGE] = get_zeroed_page(GFP_ATOMIC);
-	if (!vdso_pages[C_SIGPAGE])
+	vdso_page = get_zeroed_page(GFP_ATOMIC);
+	if (!vdso_page)
 		return -ENOMEM;
 
 	/* kuser helpers */
-	memcpy((void *)(vdso_pages[C_VECTORS] + 0x1000 - kuser_sz),
+	memcpy((void *)(vdso_page + 0x1000 - kuser_sz),
 	       __kuser_helper_start,
 	       kuser_sz);
 
+	aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
+
+	flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
+
+	return 0;
+}
+
+static int aarch32_alloc_sigreturn_vdso_page(void)
+{
+	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+	unsigned long vdso_page;
+
+	vdso_page = get_zeroed_page(GFP_ATOMIC);
+	if (!vdso_page)
+		return -ENOMEM;
+
 	/* sigreturn code */
-	memcpy((void *)vdso_pages[C_SIGPAGE],
+	memcpy((void *)vdso_page,
 	       __aarch32_sigret_code_start,
 	       sigret_sz);
 
-	flush_icache_range(vdso_pages[C_VECTORS],
-			   vdso_pages[C_VECTORS] + PAGE_SIZE);
-	flush_icache_range(vdso_pages[C_SIGPAGE],
-			   vdso_pages[C_SIGPAGE] + PAGE_SIZE);
+	aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(vdso_page);
+
+	flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
+
+	return 0;
+}
+
+static int __init aarch32_alloc_vdso_pages(void)
+{
+	int kuser_err, sigreturn_err;
+
+	kuser_err = aarch32_alloc_kuser_vdso_page();
+	if (kuser_err)
+		return kuser_err;
 
-	aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_pages[C_VECTORS]);
-	aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(vdso_pages[C_SIGPAGE]);
+	sigreturn_err = aarch32_alloc_sigreturn_vdso_page();
+	if (sigreturn_err)
+		return sigreturn_err;
 
 	return 0;
 }