diff mbox series

x86/tools: deal with 64-bit relative relocations for per-CPU symbols

Message ID 20190522174057.21770-1-ard.biesheuvel@arm.com (mailing list archive)
State New, archived
Headers show
Series x86/tools: deal with 64-bit relative relocations for per-CPU symbols | expand

Commit Message

Ard Biesheuvel May 22, 2019, 5:40 p.m. UTC
In order to fix an issue in the place relative ksymtab code, we
need to switch to 64-bit place relative references, which
require special handling in the x86 'relocs' tool. The reason
is that per-CPU symbols on x86_64 live in a separate link time
section, whose load time address is not reflected in the ELF
metadata, and so relative references emitted by the toolchain
are guaranteed to be wrong.

So fix this by extending the handling of 32-bit relative references
to per-CPU variables to support 64-bit relative references as
well.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@arm.com>
---
This is a follow-up to [0] and a prerequisite to the change it
implements: using 64-bit relative references on x86_64 requires
this handling in the 'relocs' tool and in the decompressor.

[0] https://lore.kernel.org/linux-arm-kernel/20190522150239.19314-1-ard.biesheuvel@arm.com

This patch plus [0] build and boot tested with x86_64_defconfig on QEMU/kvm + OVMF.

 arch/x86/boot/compressed/misc.c | 12 ++++++++++++
 arch/x86/tools/relocs.c         | 15 ++++++++++-----
 2 files changed, 22 insertions(+), 5 deletions(-)

Comments

Josh Poimboeuf May 24, 2019, 5:42 p.m. UTC | #1
On Wed, May 22, 2019 at 06:40:57PM +0100, Ard Biesheuvel wrote:
> In order to fix an issue in the place relative ksymtab code, we
> need to switch to 64-bit place relative references, which
> require special handling in the x86 'relocs' tool. The reason
> is that per-CPU symbols on x86_64 live in a separate link time
> section, whose load time address is not reflected in the ELF
> metadata, and so relative references emitted by the toolchain
> are guaranteed to be wrong.
> 
> So fix this by extending the handling of 32-bit relative references
> to per-CPU variables to support 64-bit relative references as
> well.
> 
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@arm.com>
> ---
> This is a follow-up to [0] and a prerequisite to the change it
> implements: using 64-bit relative references on x86_64 requires
> this handling in the 'relocs' tool and in the decompressor.
> 
> [0] https://lore.kernel.org/linux-arm-kernel/20190522150239.19314-1-ard.biesheuvel@arm.com
> 
> This patch plus [0] build and boot tested with x86_64_defconfig on QEMU/kvm + OVMF.

NACK based on

https://lkml.kernel.org/r/f2141ee5-d07a-6dd9-47c6-97e8fbdccf34@arm.com
diff mbox series

Patch

diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 5a237e8dbf8d..e089d78bd86a 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -218,6 +218,8 @@  static void handle_relocations(void *output, unsigned long output_len,
 	 * Format is:
 	 *
 	 * kernel bits...
+	 * 0 - zero terminator for inverse 64 bit relocations
+	 * 64 bit inverse relocation repeated
 	 * 0 - zero terminator for 64 bit relocations
 	 * 64 bit relocation repeated
 	 * 0 - zero terminator for inverse 32 bit relocations
@@ -258,6 +260,16 @@  static void handle_relocations(void *output, unsigned long output_len,
 
 		*(uint64_t *)ptr += delta;
 	}
+	while (*--reloc) {
+		long extended = *reloc;
+		extended += map;
+
+		ptr = (unsigned long)extended;
+		if (ptr < min_addr || ptr > max_addr)
+			error("inverse 64-bit relocation outside of kernel!\n");
+
+		*(uint64_t *)ptr -= delta;
+	}
 #endif
 }
 #else
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index ce7188cbdae5..d6a2bb90dfa6 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -26,6 +26,7 @@  static struct relocs relocs32;
 #if ELF_BITS == 64
 static struct relocs relocs32neg;
 static struct relocs relocs64;
+static struct relocs relocs64neg;
 #endif
 
 struct section {
@@ -800,12 +801,8 @@  static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
 		break;
 
 	case R_X86_64_PC64:
-		/*
-		 * Only used by jump labels
-		 */
 		if (is_percpu_sym(sym, symname))
-			die("Invalid R_X86_64_PC64 relocation against per-CPU symbol %s\n",
-			    symname);
+			add_reloc(&relocs64neg, offset);
 		break;
 
 	case R_X86_64_32:
@@ -1027,6 +1024,7 @@  static void emit_relocs(int as_text, int use_real_mode)
 #if ELF_BITS == 64
 	sort_relocs(&relocs32neg);
 	sort_relocs(&relocs64);
+	sort_relocs(&relocs64neg);
 #else
 	sort_relocs(&relocs16);
 #endif
@@ -1054,6 +1052,13 @@  static void emit_relocs(int as_text, int use_real_mode)
 		/* Print a stop */
 		write_reloc(0, stdout);
 
+		/* Now print each inverse 64-bit relocation */
+		for (i = 0; i < relocs64neg.count; i++)
+			write_reloc(relocs64neg.offset[i], stdout);
+
+		/* Print a stop */
+		write_reloc(0, stdout);
+
 		/* Now print each relocation */
 		for (i = 0; i < relocs64.count; i++)
 			write_reloc(relocs64.offset[i], stdout);