diff mbox series

[v5,14/23] arm64: Refactor vDSO code

Message ID 20190222122430.21180-15-vincenzo.frascino@arm.com (mailing list archive)
State New, archived
Headers show
Series Unify vDSOs across more architectures | expand

Commit Message

Vincenzo Frascino Feb. 22, 2019, 12:24 p.m. UTC
Most of the code for initializing the vDSOs in arm64 and compat will
be in common, hence a refactor of the current code is required to avoid
duplication and simplify maintainability.

Refactor vdso.c to simplify the implementation of arm64 vDSO compat
(which will be pushed with a future patch).

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
---
 arch/arm64/kernel/vdso.c | 208 +++++++++++++++++++++++++--------------
 1 file changed, 135 insertions(+), 73 deletions(-)

Comments

Mark Rutland Feb. 22, 2019, 2:37 p.m. UTC | #1
On Fri, Feb 22, 2019 at 12:24:21PM +0000, Vincenzo Frascino wrote:
> Most of the code for initializing the vDSOs in arm64 and compat will
> be in common, hence a refactor of the current code is required to avoid
> duplication and simplify maintainability.
> 
> Refactor vdso.c to simplify the implementation of arm64 vDSO compat
> (which will be pushed with a future patch).
> 
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will.deacon@arm.com>
> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
> ---
>  arch/arm64/kernel/vdso.c | 208 +++++++++++++++++++++++++--------------
>  1 file changed, 135 insertions(+), 73 deletions(-)
> 
> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> index 523e56658b84..c217245768ea 100644
> --- a/arch/arm64/kernel/vdso.c
> +++ b/arch/arm64/kernel/vdso.c
> @@ -41,7 +41,30 @@
>  #include <asm/vdso.h>
>  
>  extern char vdso_start[], vdso_end[];
> -static unsigned long vdso_pages __ro_after_init;
> +
> +/* vdso_lookup arch_index */
> +enum arch_vdso_type {
> +	ARM64_VDSO = 0,
> +};
> +
> +struct __vdso_lookup_t {

If you want to give this a _t suffix, please use a typedef so that you
don't need to also say 'struct' to use it.

I think this would be better named as struct vdso_instance, or struct
vdso_abi.

Thanks,
Mark.

> +	const char *name;
> +	const char *vdso_code_start;
> +	const char *vdso_code_end;
> +	unsigned long vdso_pages;
> +	/* Data Mapping */
> +	struct vm_special_mapping *dm;
> +	/* Code Mapping */
> +	struct vm_special_mapping *cm;
> +};
> +
> +static struct __vdso_lookup_t vdso_lookup[2] __ro_after_init = {
> +	{
> +		.name = "vdso",
> +		.vdso_code_start = vdso_start,
> +		.vdso_code_end = vdso_end,
> +	},
> +};
>  
>  /*
>   * The vDSO data page.
> @@ -52,6 +75,106 @@ static union {
>  } vdso_data_store __page_aligned_data;
>  struct vdso_data *vdso_data = &vdso_data_store.data;
>  
> +static int __vdso_remap(enum arch_vdso_type arch_index,
> +			const struct vm_special_mapping *sm,
> +			struct vm_area_struct *new_vma)
> +{
> +	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
> +	unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
> +				  vdso_lookup[arch_index].vdso_code_start;
> +
> +	if (vdso_size != new_size)
> +		return -EINVAL;
> +
> +	current->mm->context.vdso = (void *)new_vma->vm_start;
> +
> +	return 0;
> +}
> +
> +static int __vdso_init(enum arch_vdso_type arch_index)
> +{
> +	int i;
> +	struct page **vdso_pagelist;
> +	unsigned long pfn;
> +
> +	if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
> +		pr_err("vDSO is not a valid ELF object!\n");
> +		return -EINVAL;
> +	}
> +
> +	vdso_lookup[arch_index].vdso_pages = (
> +			vdso_lookup[arch_index].vdso_code_end -
> +			vdso_lookup[arch_index].vdso_code_start) >>
> +			PAGE_SHIFT;
> +	pr_info("%s: %ld pages (%ld code @ %p, %ld data @ %p)\n",
> +		vdso_lookup[arch_index].name,
> +		vdso_lookup[arch_index].vdso_pages + 1,
> +		vdso_lookup[arch_index].vdso_pages,
> +		vdso_lookup[arch_index].vdso_code_start, 1L, vdso_data);
> +
> +	/* Allocate the vDSO pagelist, plus a page for the data. */
> +	vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
> +				sizeof(struct page *),
> +				GFP_KERNEL);
> +	if (vdso_pagelist == NULL)
> +		return -ENOMEM;
> +
> +	/* Grab the vDSO data page. */
> +	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
> +
> +
> +	/* Grab the vDSO code pages. */
> +	pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
> +
> +	for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
> +		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
> +
> +	vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
> +	vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
> +
> +	return 0;
> +}
> +
> +static int __setup_additional_pages(enum arch_vdso_type arch_index,
> +				    struct mm_struct *mm,
> +				    struct linux_binprm *bprm,
> +				    int uses_interp)
> +{
> +	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
> +	void *ret;
> +
> +	vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
> +	/* Be sure to map the data page */
> +	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
> +
> +	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
> +	if (IS_ERR_VALUE(vdso_base)) {
> +		ret = ERR_PTR(vdso_base);
> +		goto up_fail;
> +	}
> +
> +	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
> +				       VM_READ|VM_MAYREAD,
> +				       vdso_lookup[arch_index].dm);
> +	if (IS_ERR(ret))
> +		goto up_fail;
> +
> +	vdso_base += PAGE_SIZE;
> +	mm->context.vdso = (void *)vdso_base;
> +	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
> +				       VM_READ|VM_EXEC|
> +				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
> +				       vdso_lookup[arch_index].cm);
> +	if (IS_ERR(ret))
> +		goto up_fail;
> +
> +	return 0;
> +
> +up_fail:
> +	mm->context.vdso = NULL;
> +	return PTR_ERR(ret);
> +}
> +
>  #ifdef CONFIG_COMPAT
>  /*
>   * Create and map the vectors page for AArch32 tasks.
> @@ -62,7 +185,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
>   * 1 - sigreturn code
>   */
>  static struct page *aarch32_vdso_pages[2] __ro_after_init;
> -static const struct vm_special_mapping aarch32_vdso_spec[2] = {
> +static struct vm_special_mapping aarch32_vdso_spec[2] __ro_after_init = {
>  	{
>  		/* Must be named [vectors] for compatibility with arm. */
>  		.name	= "[vectors]",
> @@ -202,15 +325,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
>  static int vdso_mremap(const struct vm_special_mapping *sm,
>  		struct vm_area_struct *new_vma)
>  {
> -	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
> -	unsigned long vdso_size = vdso_end - vdso_start;
> -
> -	if (vdso_size != new_size)
> -		return -EINVAL;
> -
> -	current->mm->context.vdso = (void *)new_vma->vm_start;
> -
> -	return 0;
> +	return __vdso_remap(ARM64_VDSO, sm, new_vma);
>  }
>  
>  static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
> @@ -225,39 +340,10 @@ static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
>  
>  static int __init vdso_init(void)
>  {
> -	int i;
> -	struct page **vdso_pagelist;
> -	unsigned long pfn;
> +	vdso_lookup[ARM64_VDSO].dm = &vdso_spec[0];
> +	vdso_lookup[ARM64_VDSO].cm = &vdso_spec[1];
>  
> -	if (memcmp(vdso_start, "\177ELF", 4)) {
> -		pr_err("vDSO is not a valid ELF object!\n");
> -		return -EINVAL;
> -	}
> -
> -	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
> -	pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
> -		vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
> -
> -	/* Allocate the vDSO pagelist, plus a page for the data. */
> -	vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
> -				GFP_KERNEL);
> -	if (vdso_pagelist == NULL)
> -		return -ENOMEM;
> -
> -	/* Grab the vDSO data page. */
> -	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
> -
> -
> -	/* Grab the vDSO code pages. */
> -	pfn = sym_to_pfn(vdso_start);
> -
> -	for (i = 0; i < vdso_pages; i++)
> -		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
> -
> -	vdso_spec[0].pages = &vdso_pagelist[0];
> -	vdso_spec[1].pages = &vdso_pagelist[1];
> -
> -	return 0;
> +	return __vdso_init(ARM64_VDSO);
>  }
>  arch_initcall(vdso_init);
>  
> @@ -265,43 +351,19 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
>  				int uses_interp)
>  {
>  	struct mm_struct *mm = current->mm;
> -	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
> -	void *ret;
> -
> -	vdso_text_len = vdso_pages << PAGE_SHIFT;
> -	/* Be sure to map the data page */
> -	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
> +	int ret;
>  
>  	if (down_write_killable(&mm->mmap_sem))
>  		return -EINTR;
> -	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
> -	if (IS_ERR_VALUE(vdso_base)) {
> -		ret = ERR_PTR(vdso_base);
> -		goto up_fail;
> -	}
> -	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
> -				       VM_READ|VM_MAYREAD,
> -				       &vdso_spec[0]);
> -	if (IS_ERR(ret))
> -		goto up_fail;
> -
> -	vdso_base += PAGE_SIZE;
> -	mm->context.vdso = (void *)vdso_base;
> -	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
> -				       VM_READ|VM_EXEC|
> -				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
> -				       &vdso_spec[1]);
> -	if (IS_ERR(ret))
> -		goto up_fail;
>  
> +	ret = __setup_additional_pages(ARM64_VDSO,
> +				       mm,
> +				       bprm,
> +				       uses_interp);
>  
>  	up_write(&mm->mmap_sem);
> -	return 0;
>  
> -up_fail:
> -	mm->context.vdso = NULL;
> -	up_write(&mm->mmap_sem);
> -	return PTR_ERR(ret);
> +	return ret;
>  }
>  
>  #define VDSO_PRECISION_MASK	~(0xFF00ULL<<48)
> -- 
> 2.20.1
>
diff mbox series

Patch

diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 523e56658b84..c217245768ea 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -41,7 +41,30 @@ 
 #include <asm/vdso.h>
 
 extern char vdso_start[], vdso_end[];
-static unsigned long vdso_pages __ro_after_init;
+
+/* vdso_lookup arch_index */
+enum arch_vdso_type {
+	ARM64_VDSO = 0,
+};
+
+struct __vdso_lookup_t {
+	const char *name;
+	const char *vdso_code_start;
+	const char *vdso_code_end;
+	unsigned long vdso_pages;
+	/* Data Mapping */
+	struct vm_special_mapping *dm;
+	/* Code Mapping */
+	struct vm_special_mapping *cm;
+};
+
+static struct __vdso_lookup_t vdso_lookup[2] __ro_after_init = {
+	{
+		.name = "vdso",
+		.vdso_code_start = vdso_start,
+		.vdso_code_end = vdso_end,
+	},
+};
 
 /*
  * The vDSO data page.
@@ -52,6 +75,106 @@  static union {
 } vdso_data_store __page_aligned_data;
 struct vdso_data *vdso_data = &vdso_data_store.data;
 
+static int __vdso_remap(enum arch_vdso_type arch_index,
+			const struct vm_special_mapping *sm,
+			struct vm_area_struct *new_vma)
+{
+	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+	unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
+				  vdso_lookup[arch_index].vdso_code_start;
+
+	if (vdso_size != new_size)
+		return -EINVAL;
+
+	current->mm->context.vdso = (void *)new_vma->vm_start;
+
+	return 0;
+}
+
+static int __vdso_init(enum arch_vdso_type arch_index)
+{
+	int i;
+	struct page **vdso_pagelist;
+	unsigned long pfn;
+
+	if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
+		pr_err("vDSO is not a valid ELF object!\n");
+		return -EINVAL;
+	}
+
+	vdso_lookup[arch_index].vdso_pages = (
+			vdso_lookup[arch_index].vdso_code_end -
+			vdso_lookup[arch_index].vdso_code_start) >>
+			PAGE_SHIFT;
+	pr_info("%s: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+		vdso_lookup[arch_index].name,
+		vdso_lookup[arch_index].vdso_pages + 1,
+		vdso_lookup[arch_index].vdso_pages,
+		vdso_lookup[arch_index].vdso_code_start, 1L, vdso_data);
+
+	/* Allocate the vDSO pagelist, plus a page for the data. */
+	vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
+				sizeof(struct page *),
+				GFP_KERNEL);
+	if (vdso_pagelist == NULL)
+		return -ENOMEM;
+
+	/* Grab the vDSO data page. */
+	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
+
+
+	/* Grab the vDSO code pages. */
+	pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
+
+	for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
+		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
+
+	vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
+	vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
+
+	return 0;
+}
+
+static int __setup_additional_pages(enum arch_vdso_type arch_index,
+				    struct mm_struct *mm,
+				    struct linux_binprm *bprm,
+				    int uses_interp)
+{
+	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+	void *ret;
+
+	vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
+	/* Be sure to map the data page */
+	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
+
+	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+	if (IS_ERR_VALUE(vdso_base)) {
+		ret = ERR_PTR(vdso_base);
+		goto up_fail;
+	}
+
+	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+				       VM_READ|VM_MAYREAD,
+				       vdso_lookup[arch_index].dm);
+	if (IS_ERR(ret))
+		goto up_fail;
+
+	vdso_base += PAGE_SIZE;
+	mm->context.vdso = (void *)vdso_base;
+	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
+				       VM_READ|VM_EXEC|
+				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+				       vdso_lookup[arch_index].cm);
+	if (IS_ERR(ret))
+		goto up_fail;
+
+	return 0;
+
+up_fail:
+	mm->context.vdso = NULL;
+	return PTR_ERR(ret);
+}
+
 #ifdef CONFIG_COMPAT
 /*
  * Create and map the vectors page for AArch32 tasks.
@@ -62,7 +185,7 @@  struct vdso_data *vdso_data = &vdso_data_store.data;
  * 1 - sigreturn code
  */
 static struct page *aarch32_vdso_pages[2] __ro_after_init;
-static const struct vm_special_mapping aarch32_vdso_spec[2] = {
+static struct vm_special_mapping aarch32_vdso_spec[2] __ro_after_init = {
 	{
 		/* Must be named [vectors] for compatibility with arm. */
 		.name	= "[vectors]",
@@ -202,15 +325,7 @@  int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 static int vdso_mremap(const struct vm_special_mapping *sm,
 		struct vm_area_struct *new_vma)
 {
-	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
-	unsigned long vdso_size = vdso_end - vdso_start;
-
-	if (vdso_size != new_size)
-		return -EINVAL;
-
-	current->mm->context.vdso = (void *)new_vma->vm_start;
-
-	return 0;
+	return __vdso_remap(ARM64_VDSO, sm, new_vma);
 }
 
 static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
@@ -225,39 +340,10 @@  static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
 
 static int __init vdso_init(void)
 {
-	int i;
-	struct page **vdso_pagelist;
-	unsigned long pfn;
+	vdso_lookup[ARM64_VDSO].dm = &vdso_spec[0];
+	vdso_lookup[ARM64_VDSO].cm = &vdso_spec[1];
 
-	if (memcmp(vdso_start, "\177ELF", 4)) {
-		pr_err("vDSO is not a valid ELF object!\n");
-		return -EINVAL;
-	}
-
-	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
-	pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
-		vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
-
-	/* Allocate the vDSO pagelist, plus a page for the data. */
-	vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
-				GFP_KERNEL);
-	if (vdso_pagelist == NULL)
-		return -ENOMEM;
-
-	/* Grab the vDSO data page. */
-	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
-
-
-	/* Grab the vDSO code pages. */
-	pfn = sym_to_pfn(vdso_start);
-
-	for (i = 0; i < vdso_pages; i++)
-		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
-
-	vdso_spec[0].pages = &vdso_pagelist[0];
-	vdso_spec[1].pages = &vdso_pagelist[1];
-
-	return 0;
+	return __vdso_init(ARM64_VDSO);
 }
 arch_initcall(vdso_init);
 
@@ -265,43 +351,19 @@  int arch_setup_additional_pages(struct linux_binprm *bprm,
 				int uses_interp)
 {
 	struct mm_struct *mm = current->mm;
-	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
-	void *ret;
-
-	vdso_text_len = vdso_pages << PAGE_SHIFT;
-	/* Be sure to map the data page */
-	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
+	int ret;
 
 	if (down_write_killable(&mm->mmap_sem))
 		return -EINTR;
-	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
-	if (IS_ERR_VALUE(vdso_base)) {
-		ret = ERR_PTR(vdso_base);
-		goto up_fail;
-	}
-	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
-				       VM_READ|VM_MAYREAD,
-				       &vdso_spec[0]);
-	if (IS_ERR(ret))
-		goto up_fail;
-
-	vdso_base += PAGE_SIZE;
-	mm->context.vdso = (void *)vdso_base;
-	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
-				       VM_READ|VM_EXEC|
-				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-				       &vdso_spec[1]);
-	if (IS_ERR(ret))
-		goto up_fail;
 
+	ret = __setup_additional_pages(ARM64_VDSO,
+				       mm,
+				       bprm,
+				       uses_interp);
 
 	up_write(&mm->mmap_sem);
-	return 0;
 
-up_fail:
-	mm->context.vdso = NULL;
-	up_write(&mm->mmap_sem);
-	return PTR_ERR(ret);
+	return ret;
 }
 
 #define VDSO_PRECISION_MASK	~(0xFF00ULL<<48)