@@ -38,15 +38,15 @@ extern char vdso32_start[], vdso32_end[];
#endif /* CONFIG_VDSO32 */
-#ifdef CONFIG_64BIT
-#define vdso_start vdso64_start
-#define vdso_end vdso64_end
-#define VDSO_SYMBOL VDSO64_SYMBOL
-#else /* CONFIG_64BIT */
-#define vdso_start vdso32_start
-#define vdso_end vdso32_end
-#define VDSO_SYMBOL VDSO32_SYMBOL
-#endif /* CONFIG_64BIT */
+#ifdef CONFIG_VDSO64ILP32
+#include <generated/vdso64ilp32-offsets.h>
+
+#define VDSO64ILP32_SYMBOL(base, name) \
+ (void __user *)((unsigned long)(base) + rv64ilp32__vdso_##name##_offset)
+
+extern char vdso64ilp32_start[], vdso64ilp32_end[];
+
+#endif /* CONFIG_VDSO64ILP32 */
#endif /* !__ASSEMBLY__ */
@@ -182,7 +182,7 @@ static void __init_or_module _apply_alternatives(struct alt_entry *begin,
}
#ifdef CONFIG_MMU
-static void __init apply_vdso_alternatives(void)
+static void __init apply_vdso_alternatives(void *vdso_start)
{
const Elf_Ehdr *hdr;
const Elf_Shdr *shdr;
@@ -203,7 +203,7 @@ static void __init apply_vdso_alternatives(void)
RISCV_ALTERNATIVES_BOOT);
}
#else
-static void __init apply_vdso_alternatives(void) { }
+static void __init apply_vdso_alternatives(void *vdso_start) { }
#endif
void __init apply_boot_alternatives(void)
@@ -216,7 +216,16 @@ void __init apply_boot_alternatives(void)
(struct alt_entry *)__alt_end,
RISCV_ALTERNATIVES_BOOT);
- apply_vdso_alternatives();
+#ifdef CONFIG_VDSO64
+ apply_vdso_alternatives(vdso64_start);
+#endif
+#ifdef CONFIG_VDSO32
+ apply_vdso_alternatives(vdso32_start);
+#endif
+#ifdef CONFIG_VDSO64ILP32
+ apply_vdso_alternatives(vdso64ilp32_start);
+#endif
+
}
/*
@@ -345,10 +345,25 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
/* Set up to return from userspace. */
-#ifdef CONFIG_MMU
- regs->ra = (unsigned long)VDSO_SYMBOL(
- current->mm->context.vdso, rt_sigreturn);
-#else
+#ifdef CONFIG_VDSO64
+ if (!test_thread_flag(TIF_32BIT))
+ regs->ra = (unsigned long)VDSO64_SYMBOL(
+ current->mm->context.vdso, rt_sigreturn);
+#endif /* CONFIG_VDSO64 */
+
+#ifdef CONFIG_VDSO32
+ if (test_thread_flag(TIF_32BIT) && !test_thread_flag(TIF_64ILP32))
+ regs->ra = (unsigned long)VDSO32_SYMBOL(
+ current->mm->context.vdso, rt_sigreturn);
+#endif /* CONFIG_VDSO32 */
+
+#ifdef CONFIG_VDSO64ILP32
+ if (test_thread_flag(TIF_32BIT) && test_thread_flag(TIF_64ILP32))
+ regs->ra = (unsigned long)VDSO64ILP32_SYMBOL(
+ current->mm->context.vdso, rt_sigreturn);
+#endif /* CONFIG_VDSO64ILP32 */
+
+#ifndef CONFIG_MMU
/*
* For the nommu case we don't have a VDSO. Instead we push two
* instructions to call the rt_sigreturn syscall onto the user stack.
@@ -50,9 +50,14 @@ struct __vdso_info {
struct vm_special_mapping *cm;
};
-static struct __vdso_info vdso_info;
-#ifdef CONFIG_COMPAT
-static struct __vdso_info compat_vdso_info;
+#ifdef CONFIG_VDSO64
+static struct __vdso_info vdso64_info;
+#endif
+#ifdef CONFIG_VDSO32
+static struct __vdso_info vdso32_info;
+#endif
+#ifdef CONFIG_VDSO64ILP32
+static struct __vdso_info vdso64ilp32_info;
#endif
static int vdso_mremap(const struct vm_special_mapping *sm,
@@ -114,10 +119,16 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
mmap_read_lock(mm);
for_each_vma(vmi, vma) {
- if (vma_is_special_mapping(vma, vdso_info.dm))
+#ifdef CONFIG_VDSO64
+ if (vma_is_special_mapping(vma, vdso64_info.dm))
zap_vma_pages(vma);
-#ifdef CONFIG_COMPAT
- if (vma_is_special_mapping(vma, compat_vdso_info.dm))
+#endif
+#ifdef CONFIG_VDSO32
+ if (vma_is_special_mapping(vma, vdso32_info.dm))
+ zap_vma_pages(vma);
+#endif
+#ifdef CONFIG_VDSO64ILP32
+ if (vma_is_special_mapping(vma, vdso64ilp32_info.dm))
zap_vma_pages(vma);
#endif
}
@@ -172,13 +183,15 @@ static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
},
};
-static struct __vdso_info vdso_info __ro_after_init = {
+#ifdef CONFIG_VDSO64
+static struct __vdso_info vdso64_info __ro_after_init = {
.name = "vdso",
- .vdso_code_start = vdso_start,
- .vdso_code_end = vdso_end,
+ .vdso_code_start = vdso64_start,
+ .vdso_code_end = vdso64_end,
.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
};
+#endif
#ifdef CONFIG_COMPAT
static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
@@ -191,21 +204,48 @@ static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
.mremap = vdso_mremap,
},
};
+#endif
-static struct __vdso_info compat_vdso_info __ro_after_init = {
- .name = "compat_vdso",
+#ifdef CONFIG_VDSO32
+static struct __vdso_info vdso32_info __ro_after_init = {
+ .name = "vdso32",
.vdso_code_start = vdso32_start,
.vdso_code_end = vdso32_end,
+#ifdef CONFIG_64BIT
.dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
.cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
+#else
+ .dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
+ .cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
+#endif
+};
+#endif
+
+#ifdef CONFIG_VDSO64ILP32
+static struct __vdso_info vdso64ilp32_info __ro_after_init = {
+ .name = "vdso64ilp32",
+ .vdso_code_start = vdso64ilp32_start,
+ .vdso_code_end = vdso64ilp32_end,
+#ifdef CONFIG_64BIT
+ .dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
+ .cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
+#else
+ .dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
+ .cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
+#endif
};
#endif
static int __init vdso_init(void)
{
- __vdso_init(&vdso_info);
-#ifdef CONFIG_COMPAT
- __vdso_init(&compat_vdso_info);
+#ifdef CONFIG_VDSO64
+ __vdso_init(&vdso64_info);
+#endif
+#ifdef CONFIG_VDSO32
+ __vdso_init(&vdso32_info);
+#endif
+#ifdef CONFIG_VDSO64ILP32
+ __vdso_init(&vdso64ilp32_info);
#endif
return 0;
@@ -265,8 +305,18 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = __setup_additional_pages(mm, bprm, uses_interp,
- &compat_vdso_info);
+#ifdef CONFIG_VDSO32
+ if (test_thread_flag(TIF_32BIT) && !test_thread_flag(TIF_64ILP32))
+ ret = __setup_additional_pages(mm, bprm, uses_interp,
+ &vdso32_info);
+#endif
+
+#ifdef CONFIG_VDSO64ILP32
+ if (test_thread_flag(TIF_32BIT) && test_thread_flag(TIF_64ILP32))
+ ret = __setup_additional_pages(mm, bprm, uses_interp,
+ &vdso64ilp32_info);
+#endif
+
mmap_write_unlock(mm);
return ret;
@@ -281,7 +331,21 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
+#ifdef CONFIG_VDSO64
+ if (!test_thread_flag(TIF_32BIT))
+ ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso64_info);
+#endif
+
+#ifdef CONFIG_VDSO32
+ if (test_thread_flag(TIF_32BIT) && !test_thread_flag(TIF_64ILP32))
+ ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso32_info);
+#endif
+
+#ifdef CONFIG_VDSO64ILP32
+ if (test_thread_flag(TIF_32BIT) && test_thread_flag(TIF_64ILP32))
+ ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso64ilp32_info);
+#endif
+
mmap_write_unlock(mm);
return ret;