@@ -8,5 +8,6 @@ generated-y += xen-hypercalls.h
generic-y += dma-contiguous.h
generic-y += early_ioremap.h
+generic-y += export.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
deleted file mode 100644
@@ -1,5 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_64BIT
-#define KSYM_ALIGN 16
-#endif
-#include <asm-generic/export.h>
@@ -5,12 +5,10 @@
#define KSYM_FUNC(x) x
#endif
#ifdef CONFIG_64BIT
-#define __put .quad
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 8
#endif
#else
-#define __put .long
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 4
#endif
@@ -19,6 +17,16 @@
#define KCRC_ALIGN 4
#endif
+.macro __put, val, name
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ .long \val - ., \name - .
+#elif defined(CONFIG_64BIT)
+ .quad \val, \name
+#else
+ .long \val, \name
+#endif
+.endm
+
/*
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
* since we immediately emit into those sections anyway.
@@ -280,6 +280,25 @@ unsigned long read_word_at_a_time(const void *addr)
#endif /* __KERNEL__ */
+/*
+ * Force the compiler to emit 'sym' as a symbol, so that we can reference
+ * it from inline assembler. Necessary in case 'sym' could be inlined
+ * otherwise, or eliminated entirely due to lack of references that are
+ * visible to the compiler.
+ */
+#define __ADDRESSABLE(sym) \
+ static void * __attribute__((section(".discard.addressable"), used)) \
+ __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
+
+/**
+ * offset_to_ptr - convert a relative memory offset to an absolute pointer
+ * @off: the address of the 32-bit offset value
+ */
+static inline void *offset_to_ptr(const int *off)
+{
+ return (void *)((unsigned long)off + *off);
+}
+
#endif /* __ASSEMBLY__ */
#ifndef __optimize
@@ -18,12 +18,6 @@
#define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x)
#ifndef __ASSEMBLY__
-struct kernel_symbol
-{
- unsigned long value;
- const char *name;
-};
-
#ifdef MODULE
extern struct module __this_module;
#define THIS_MODULE (&__this_module)
@@ -54,17 +48,47 @@ extern struct module __this_module;
#define __CRC_SYMBOL(sym, sec)
#endif
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#include <linux/compiler.h>
+/*
+ * Emit the ksymtab entry as a pair of relative references: this reduces
+ * the size by half on 64-bit architectures, and eliminates the need for
+ * absolute relocations that require runtime processing on relocatable
+ * kernels.
+ */
+#define __KSYMTAB_ENTRY(sym, sec) \
+ __ADDRESSABLE(sym) \
+ asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
+ " .balign 8 \n" \
+ "__ksymtab_" #sym ": \n" \
+ " .long " #sym "- . \n" \
+ " .long __kstrtab_" #sym "- . \n" \
+ " .previous \n")
+
+struct kernel_symbol {
+ int value_offset;
+ int name_offset;
+};
+#else
+#define __KSYMTAB_ENTRY(sym, sec) \
+ static const struct kernel_symbol __ksymtab_##sym \
+ __attribute__((section("___ksymtab" sec "+" #sym), used)) \
+ = { (unsigned long)&sym, __kstrtab_##sym }
+
+struct kernel_symbol {
+ unsigned long value;
+ const char *name;
+};
+#endif
+
/* For every exported symbol, place a struct in the __ksymtab section */
#define ___EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
= #sym; \
- static const struct kernel_symbol __ksymtab_##sym \
- __used \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- = { (unsigned long)&sym, __kstrtab_##sym }
+ __KSYMTAB_ENTRY(sym, sec)
#if defined(__DISABLE_EXPORTS)
@@ -547,12 +547,30 @@ static bool check_symbol(const struct symsearch *syms,
return true;
}
+static unsigned long kernel_symbol_value(const struct kernel_symbol *sym)
+{
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ return (unsigned long)offset_to_ptr(&sym->value_offset);
+#else
+ return sym->value;
+#endif
+}
+
+static const char *kernel_symbol_name(const struct kernel_symbol *sym)
+{
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ return offset_to_ptr(&sym->name_offset);
+#else
+ return sym->name;
+#endif
+}
+
static int cmp_name(const void *va, const void *vb)
{
const char *a;
const struct kernel_symbol *b;
a = va; b = vb;
- return strcmp(a, b->name);
+ return strcmp(a, kernel_symbol_name(b));
}
static bool find_symbol_in_section(const struct symsearch *syms,
@@ -2192,7 +2210,7 @@ void *__symbol_get(const char *symbol)
sym = NULL;
preempt_enable();
- return sym ? (void *)sym->value : NULL;
+ return sym ? (void *)kernel_symbol_value(sym) : NULL;
}
EXPORT_SYMBOL_GPL(__symbol_get);
@@ -2222,10 +2240,12 @@ static int verify_export_symbols(struct module *mod)
for (i = 0; i < ARRAY_SIZE(arr); i++) {
for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
- if (find_symbol(s->name, &owner, NULL, true, false)) {
+ if (find_symbol(kernel_symbol_name(s), &owner, NULL,
+ true, false)) {
pr_err("%s: exports duplicate symbol %s"
" (owned by %s)\n",
- mod->name, s->name, module_name(owner));
+ mod->name, kernel_symbol_name(s),
+ module_name(owner));
return -ENOEXEC;
}
}
@@ -2274,7 +2294,7 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
ksym = resolve_symbol_wait(mod, info, name);
/* Ok if resolved. */
if (ksym && !IS_ERR(ksym)) {
- sym[i].st_value = ksym->value;
+ sym[i].st_value = kernel_symbol_value(ksym);
break;
}
@@ -2534,7 +2554,7 @@ static int is_exported(const char *name, unsigned long value,
ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
else
ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
- return ks != NULL && ks->value == value;
+ return ks != NULL && kernel_symbol_value(ks) == value;
}
/* As per nm */