diff mbox series

[RFC,3/6] kvm: arm64: Fix up RELR relocation in hyp code/data

Message ID 20201119162543.78001-4-dbrazdil@google.com (mailing list archive)
State New, archived
Headers show
Series kvm: arm64: Fix up hyp relocations | expand

Commit Message

David Brazdil Nov. 19, 2020, 4:25 p.m. UTC
The arm64 kernel also supports packing of relocation data using the RELR
format. Implement a parser of RELR data and fixup the relocations using
the same infra as RELA relocs.

Signed-off-by: David Brazdil <dbrazdil@google.com>
---
 arch/arm64/kvm/va_layout.c | 41 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 41 insertions(+)

Comments

Marc Zyngier Nov. 24, 2020, 1:24 p.m. UTC | #1
On 2020-11-19 16:25, David Brazdil wrote:
> The arm64 kernel also supports packing of relocation data using the 
> RELR
> format. Implement a parser of RELR data and fixup the relocations using
> the same infra as RELA relocs.
> 
> Signed-off-by: David Brazdil <dbrazdil@google.com>
> ---
>  arch/arm64/kvm/va_layout.c | 41 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 41 insertions(+)
> 
> diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
> index b80fab974896..7f45a98eacfd 100644
> --- a/arch/arm64/kvm/va_layout.c
> +++ b/arch/arm64/kvm/va_layout.c
> @@ -145,6 +145,43 @@ static void __fixup_hyp_rela(void)
>  		__fixup_hyp_rel(rel[i].r_offset);
>  }
> 
> +#ifdef CONFIG_RELR
> +static void __fixup_hyp_relr(void)
> +{
> +	u64 *rel, *end;
> +
> +	rel = (u64*)(kimage_vaddr + __load_elf_u64(__relr_offset));
> +	end = rel + (__load_elf_u64(__relr_size) / sizeof(*rel));
> +
> +	while (rel < end) {
> +		unsigned n;
> +		u64 addr = *(rel++);
> +
> +		/* Address must not have the LSB set. */
> +		BUG_ON(addr & BIT(0));
> +
> +		/* Fix up the first address of the chain. */
> +		__fixup_hyp_rel(addr);
> +
> +		/*
> +		 * Loop over bitmaps, i.e. as long as words' LSB is 1.
> +		 * Each bit (ordered from LSB to MSB) represents one word from
> +		 * the last full address (exclusive). If the corresponding bit
> +		 * is 1, there is a relative relocation on that word.
> +		 */

What is the endianness of this bitmap? Is it guaranteed to be in
CPU-endian format?

> +		for (n = 0; rel < end && (*rel & BIT(0)); n++) {
> +			unsigned i;
> +			u64 bitmap = *(rel++);

nit: if you change this u64 for an unsigned long...

> +
> +			for (i = 1; i < 64; ++i) {
> +				if ((bitmap & BIT(i)))
> +					__fixup_hyp_rel(addr + 8 * (63 * n + i));
> +			}

... this can be written as:

         i = 1;
         for_each_set_bit_from(i, &bitmap, 64)
                 __fixup_hyp_rel(addr + 8 * (63 * n + i));

> +		}
> +	}
> +}
> +#endif
> +
>  /*
>   * The kernel relocated pointers to kernel VA. Iterate over 
> relocations in
>   * the hypervisor ELF sections and convert them to hyp VA. This avoids 
> the
> @@ -156,6 +193,10 @@ __init void kvm_fixup_hyp_relocations(void)
>  		return;
> 
>  	__fixup_hyp_rela();
> +
> +#ifdef CONFIG_RELR
> +	__fixup_hyp_relr();
> +#endif
>  }
> 
>  static u32 compute_instruction(int n, u32 rd, u32 rn)

Thanks,

         M.
Ard Biesheuvel Nov. 24, 2020, 2:02 p.m. UTC | #2
On Thu, 19 Nov 2020 at 17:25, David Brazdil <dbrazdil@google.com> wrote:
>
> The arm64 kernel also supports packing of relocation data using the RELR
> format. Implement a parser of RELR data and fixup the relocations using
> the same infra as RELA relocs.
>
> Signed-off-by: David Brazdil <dbrazdil@google.com>
> ---
>  arch/arm64/kvm/va_layout.c | 41 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 41 insertions(+)
>
> diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
> index b80fab974896..7f45a98eacfd 100644
> --- a/arch/arm64/kvm/va_layout.c
> +++ b/arch/arm64/kvm/va_layout.c
> @@ -145,6 +145,43 @@ static void __fixup_hyp_rela(void)
>                 __fixup_hyp_rel(rel[i].r_offset);
>  }
>
> +#ifdef CONFIG_RELR

Please prefer IS_ENABLED() [below] if the code in question can compile
(but perhaps not link) correctly when the symbol is not set.

> +static void __fixup_hyp_relr(void)

__init ?

> +{
> +       u64 *rel, *end;
> +
> +       rel = (u64*)(kimage_vaddr + __load_elf_u64(__relr_offset));
> +       end = rel + (__load_elf_u64(__relr_size) / sizeof(*rel));
> +

The reason for this little dance with the offset and size exists
because the initial relocation routine runs from the ID mapping, but
the relocation fixups are performed via the kernel's VA mapping, as
the ID mapping does not cover the entire image. So simple adrp/add
pairs aren't suitable there.

In this case (as well as in the previous patch, btw), that problem
does not exist, and so I think we should be able to simply define
start and end markers inside the .rela sections, and reference them
here as symbols with external linkage (which ensures that they are
referenced relatively, although you could add in a
__attribute__((visibility("hidden"))) for good measure)



> +       while (rel < end) {
> +               unsigned n;
> +               u64 addr = *(rel++);

Parens are redundant here (and below)

> +
> +               /* Address must not have the LSB set. */
> +               BUG_ON(addr & BIT(0));
> +
> +               /* Fix up the first address of the chain. */
> +               __fixup_hyp_rel(addr);
> +
> +               /*
> +                * Loop over bitmaps, i.e. as long as words' LSB is 1.
> +                * Each bit (ordered from LSB to MSB) represents one word from
> +                * the last full address (exclusive). If the corresponding bit
> +                * is 1, there is a relative relocation on that word.
> +                */
> +               for (n = 0; rel < end && (*rel & BIT(0)); n++) {
> +                       unsigned i;
> +                       u64 bitmap = *(rel++);
> +
> +                       for (i = 1; i < 64; ++i) {
> +                               if ((bitmap & BIT(i)))
> +                                       __fixup_hyp_rel(addr + 8 * (63 * n + i));
> +                       }
> +               }
> +       }
> +}
> +#endif
> +
>  /*
>   * The kernel relocated pointers to kernel VA. Iterate over relocations in
>   * the hypervisor ELF sections and convert them to hyp VA. This avoids the
> @@ -156,6 +193,10 @@ __init void kvm_fixup_hyp_relocations(void)
>                 return;
>
>         __fixup_hyp_rela();
> +
> +#ifdef CONFIG_RELR
> +       __fixup_hyp_relr();
> +#endif
>  }
>
>  static u32 compute_instruction(int n, u32 rd, u32 rn)
> --
> 2.29.2.299.gdc1121823c-goog
>
diff mbox series

Patch

diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index b80fab974896..7f45a98eacfd 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -145,6 +145,43 @@  static void __fixup_hyp_rela(void)
 		__fixup_hyp_rel(rel[i].r_offset);
 }
 
+#ifdef CONFIG_RELR
+static void __fixup_hyp_relr(void)
+{
+	u64 *rel, *end;
+
+	rel = (u64*)(kimage_vaddr + __load_elf_u64(__relr_offset));
+	end = rel + (__load_elf_u64(__relr_size) / sizeof(*rel));
+
+	while (rel < end) {
+		unsigned n;
+		u64 addr = *(rel++);
+
+		/* Address must not have the LSB set. */
+		BUG_ON(addr & BIT(0));
+
+		/* Fix up the first address of the chain. */
+		__fixup_hyp_rel(addr);
+
+		/*
+		 * Loop over bitmaps, i.e. as long as words' LSB is 1.
+		 * Each bit (ordered from LSB to MSB) represents one word from
+		 * the last full address (exclusive). If the corresponding bit
+		 * is 1, there is a relative relocation on that word.
+		 */
+		for (n = 0; rel < end && (*rel & BIT(0)); n++) {
+			unsigned i;
+			u64 bitmap = *(rel++);
+
+			for (i = 1; i < 64; ++i) {
+				if ((bitmap & BIT(i)))
+					__fixup_hyp_rel(addr + 8 * (63 * n + i));
+			}
+		}
+	}
+}
+#endif
+
 /*
  * The kernel relocated pointers to kernel VA. Iterate over relocations in
  * the hypervisor ELF sections and convert them to hyp VA. This avoids the
@@ -156,6 +193,10 @@  __init void kvm_fixup_hyp_relocations(void)
 		return;
 
 	__fixup_hyp_rela();
+
+#ifdef CONFIG_RELR
+	__fixup_hyp_relr();
+#endif
 }
 
 static u32 compute_instruction(int n, u32 rd, u32 rn)