diff mbox

[Very,RFC,2/3] insn: Add arch64_insn_gen_branch_imm to generate branch

Message ID 1470716340-24474-3-git-send-email-konrad.wilk@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Konrad Rzeszutek Wilk Aug. 9, 2016, 4:18 a.m. UTC
We copied from Linux code (v4.7) the code that generates
the unconditional branch instructions. This is mostly
from Linux commit c0cafbae20d2878883ec3c06d6ea30ff38a6bf92
"arm64: introduce aarch64_insn_gen_branch_reg()"

However the branch-link instructions was omitted, only
the branch instruction is generated.

This lays the groundwork for Livepatch to generate the
trampoline to jump to the new replacement function.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
--
Cc: Ross Lagerwall <ross.lagerwall@citrix.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/arm64/insn.c        | 41 ++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/insn.h |  2 ++
 2 files changed, 43 insertions(+)

Comments

Julien Grall Aug. 11, 2016, 4:03 p.m. UTC | #1
Hi Konrad,

On 09/08/2016 06:18, Konrad Rzeszutek Wilk wrote:
> We copied from Linux code (v4.7) the code that generates
> the unconditional branch instructions. This is mostly
> from Linux commit c0cafbae20d2878883ec3c06d6ea30ff38a6bf92
> "arm64: introduce aarch64_insn_gen_branch_reg()"
>
> However the branch-link instructions was omitted, only
> the branch instruction is generated.

I would prefer to have the function fully copied (i.e with the 
branch-link instructions). This will make easier to re-sync the code 
later on.

>
> This lays the groundwork for Livepatch to generate the
> trampoline to jump to the new replacement function.
>
> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> --
> Cc: Ross Lagerwall <ross.lagerwall@citrix.com>
> Cc: Stefano Stabellini <sstabellini@kernel.org>
> Cc: Julien Grall <julien.grall@arm.com>
> ---
>  xen/arch/arm/arm64/insn.c        | 41 ++++++++++++++++++++++++++++++++++++++++
>  xen/include/asm-arm/arm64/insn.h |  2 ++
>  2 files changed, 43 insertions(+)
>
> diff --git a/xen/arch/arm/arm64/insn.c b/xen/arch/arm/arm64/insn.c
> index 12b4d96..15dd7bc 100644
> --- a/xen/arch/arm/arm64/insn.c
> +++ b/xen/arch/arm/arm64/insn.c
> @@ -209,6 +209,47 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset)
>  	BUG();
>  }
>
> +static inline long branch_imm_common(unsigned long pc, unsigned long addr,
> +				     long range)
> +{
> +	long offset;
> +
> +	if ((pc & 0x3) || (addr & 0x3)) {
> +		pr_err("%s: A64 instructions must be word aligned\n", __func__);
> +		return range;
> +	}
> +
> +	offset = ((long)addr - (long)pc);
> +
> +	if (offset < -range || offset >= range) {
> +		pr_err("%s: offset out of range\n", __func__);
> +		return range;
> +	}
> +
> +	return offset;
> +}
> +
> +u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr)
> +{
> +	u32 insn;
> +	long offset;
> +
> +	/*
> +	 * B/BL support [-128M, 128M) offset
> +	 * ARM64 virtual address arrangement guarantees all kernel and module
> +	 * texts are within +/-128M.
> +	 */
> +	offset = branch_imm_common(pc, addr, SZ_128M);
> +	if (offset >= SZ_128M)
> +		return AARCH64_BREAK_FAULT;
> +
> +	insn = aarch64_insn_get_b_value();
> +
> +	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
> +					     offset >> 2);
> +}

I would prefer to have the functions added at the same place as in Linux 
for similar reason as stated previously.

> +
> +

I think you can drop one empty line here.

>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/include/asm-arm/arm64/insn.h b/xen/include/asm-arm/arm64/insn.h
> index 6ce37be..378ba11 100644
> --- a/xen/include/asm-arm/arm64/insn.h
> +++ b/xen/include/asm-arm/arm64/insn.h
> @@ -61,6 +61,8 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
>  s32 aarch64_get_branch_offset(u32 insn);
>  u32 aarch64_set_branch_offset(u32 insn, s32 offset);
>
> +u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr);
> +
>  /* Wrapper for common code */
>  static inline bool insn_is_branch_imm(u32 insn)
>  {
>

Regards,
diff mbox

Patch

diff --git a/xen/arch/arm/arm64/insn.c b/xen/arch/arm/arm64/insn.c
index 12b4d96..15dd7bc 100644
--- a/xen/arch/arm/arm64/insn.c
+++ b/xen/arch/arm/arm64/insn.c
@@ -209,6 +209,47 @@  u32 aarch64_set_branch_offset(u32 insn, s32 offset)
 	BUG();
 }
 
+static inline long branch_imm_common(unsigned long pc, unsigned long addr,
+				     long range)
+{
+	long offset;
+
+	if ((pc & 0x3) || (addr & 0x3)) {
+		pr_err("%s: A64 instructions must be word aligned\n", __func__);
+		return range;
+	}
+
+	offset = ((long)addr - (long)pc);
+
+	if (offset < -range || offset >= range) {
+		pr_err("%s: offset out of range\n", __func__);
+		return range;
+	}
+
+	return offset;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr)
+{
+	u32 insn;
+	long offset;
+
+	/*
+	 * B/BL support [-128M, 128M) offset
+	 * ARM64 virtual address arrangement guarantees all kernel and module
+	 * texts are within +/-128M.
+	 */
+	offset = branch_imm_common(pc, addr, SZ_128M);
+	if (offset >= SZ_128M)
+		return AARCH64_BREAK_FAULT;
+
+	insn = aarch64_insn_get_b_value();
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+					     offset >> 2);
+}
+
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-arm/arm64/insn.h b/xen/include/asm-arm/arm64/insn.h
index 6ce37be..378ba11 100644
--- a/xen/include/asm-arm/arm64/insn.h
+++ b/xen/include/asm-arm/arm64/insn.h
@@ -61,6 +61,8 @@  u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 s32 aarch64_get_branch_offset(u32 insn);
 u32 aarch64_set_branch_offset(u32 insn, s32 offset);
 
+u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr);
+
 /* Wrapper for common code */
 static inline bool insn_is_branch_imm(u32 insn)
 {