diff mbox series

[bpf-next,3/3] bpf, arm64: use bpf_jit_binary_pack_alloc

Message ID 20230605074024.1055863-4-puranjay12@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf, arm64: use BPF prog pack allocator in BPF JIT | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for veristat
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/cc_maintainers warning 7 maintainers not CCed: yhs@fb.com john.fastabend@gmail.com sdf@google.com will@kernel.org zlim.lnx@gmail.com jolsa@kernel.org haoluo@google.com
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch warning CHECK: Comparison to NULL could be written "!aarch64_insn_copy" WARNING: line length of 81 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 fail Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ${{ matrix.test }} on ${{ matrix.arch }} with ${{ matrix.toolchain_full }}
bpf/vmtest-bpf-next-VM_Test-2 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-7 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-8 success Logs for veristat

Commit Message

Puranjay Mohan June 5, 2023, 7:40 a.m. UTC
Use bpf_jit_binary_pack_alloc for memory management of JIT binaries in
ARM64 BPF JIT. The bpf_jit_binary_pack_alloc creates a pair of RW and RX
buffers. The JIT writes the program into the RW buffer. When the JIT is
done, the program is copied to the final ROX buffer
with bpf_jit_binary_pack_finalize.

Implement bpf_arch_text_copy() and bpf_arch_text_invalidate() for ARM64
JIT as these functions are required by bpf_jit_binary_pack allocator.

Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
---
 arch/arm64/net/bpf_jit_comp.c | 119 +++++++++++++++++++++++++++++-----
 1 file changed, 102 insertions(+), 17 deletions(-)

Comments

Song Liu June 5, 2023, 5:05 p.m. UTC | #1
On Mon, Jun 5, 2023 at 12:40 AM Puranjay Mohan <puranjay12@gmail.com> wrote:
>
> Use bpf_jit_binary_pack_alloc for memory management of JIT binaries in
> ARM64 BPF JIT. The bpf_jit_binary_pack_alloc creates a pair of RW and RX
> buffers. The JIT writes the program into the RW buffer. When the JIT is
> done, the program is copied to the final ROX buffer
> with bpf_jit_binary_pack_finalize.
>
> Implement bpf_arch_text_copy() and bpf_arch_text_invalidate() for ARM64
> JIT as these functions are required by bpf_jit_binary_pack allocator.
>
> Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
> ---
>  arch/arm64/net/bpf_jit_comp.c | 119 +++++++++++++++++++++++++++++-----
>  1 file changed, 102 insertions(+), 17 deletions(-)
>
> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> index 145b540ec34f..ee9414cadea8 100644
> --- a/arch/arm64/net/bpf_jit_comp.c
> +++ b/arch/arm64/net/bpf_jit_comp.c
> @@ -76,6 +76,7 @@ struct jit_ctx {
>         int *offset;
>         int exentry_idx;
>         __le32 *image;
> +       __le32 *ro_image;

We are using:
image vs. ro_image
rw_header vs. header
rw_image_ptr vs. image_ptr

Shall we be more consistent with rw_ or ro_ prefix?

>         u32 stack_size;
>         int fpb_offset;
>  };
> @@ -205,6 +206,20 @@ static void jit_fill_hole(void *area, unsigned int size)
>                 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
>  }
>
> +int bpf_arch_text_invalidate(void *dst, size_t len)
> +{
> +       __le32 *ptr;
> +       int ret;
> +
> +       for (ptr = dst; len >= sizeof(u32); len -= sizeof(u32)) {
> +               ret = aarch64_insn_patch_text_nosync(ptr++, AARCH64_BREAK_FAULT);

I think one aarch64_insn_patch_text_nosync() per 4 byte is too much overhead.
Shall we add a helper to do this in bigger patches?

Thanks,
Song

> +               if (ret)
> +                       return ret;
> +       }
> +
> +       return 0;
> +}
> +

[...]
Puranjay Mohan June 5, 2023, 6:33 p.m. UTC | #2
Hi,

On Mon, Jun 5, 2023 at 7:05 PM Song Liu <song@kernel.org> wrote:
>
> On Mon, Jun 5, 2023 at 12:40 AM Puranjay Mohan <puranjay12@gmail.com> wrote:
> >
> > Use bpf_jit_binary_pack_alloc for memory management of JIT binaries in
> > ARM64 BPF JIT. The bpf_jit_binary_pack_alloc creates a pair of RW and RX
> > buffers. The JIT writes the program into the RW buffer. When the JIT is
> > done, the program is copied to the final ROX buffer
> > with bpf_jit_binary_pack_finalize.
> >
> > Implement bpf_arch_text_copy() and bpf_arch_text_invalidate() for ARM64
> > JIT as these functions are required by bpf_jit_binary_pack allocator.
> >
> > Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
> > ---
> >  arch/arm64/net/bpf_jit_comp.c | 119 +++++++++++++++++++++++++++++-----
> >  1 file changed, 102 insertions(+), 17 deletions(-)
> >
> > diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> > index 145b540ec34f..ee9414cadea8 100644
> > --- a/arch/arm64/net/bpf_jit_comp.c
> > +++ b/arch/arm64/net/bpf_jit_comp.c
> > @@ -76,6 +76,7 @@ struct jit_ctx {
> >         int *offset;
> >         int exentry_idx;
> >         __le32 *image;
> > +       __le32 *ro_image;
>
> We are using:
> image vs. ro_image
> rw_header vs. header
> rw_image_ptr vs. image_ptr

Will use "rw_image" and "image" in the next version.

>
> Shall we be more consistent with rw_ or ro_ prefix?
>
> >         u32 stack_size;
> >         int fpb_offset;
> >  };
> > @@ -205,6 +206,20 @@ static void jit_fill_hole(void *area, unsigned int size)
> >                 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
> >  }
> >
> > +int bpf_arch_text_invalidate(void *dst, size_t len)
> > +{
> > +       __le32 *ptr;
> > +       int ret;
> > +
> > +       for (ptr = dst; len >= sizeof(u32); len -= sizeof(u32)) {
> > +               ret = aarch64_insn_patch_text_nosync(ptr++, AARCH64_BREAK_FAULT);
>
> I think one aarch64_insn_patch_text_nosync() per 4 byte is too much overhead.
> Shall we add a helper to do this in bigger patches?

What would be the most efficient way to build this helper? As arm64 doesn't
have the __text_poke() API. Calling copy_to_kernel_nofault() in a loop might
not be the best way. One way would be to use __put_kernel_nofault() directly.

Also, what should we call this helper? aarch64_insn_memset() ?

Thanks,
Puranjay
Song Liu June 5, 2023, 8:13 p.m. UTC | #3
On Mon, Jun 5, 2023 at 11:34 AM Puranjay Mohan <puranjay12@gmail.com> wrote:
>
> Hi,
>
> On Mon, Jun 5, 2023 at 7:05 PM Song Liu <song@kernel.org> wrote:
> >
> > On Mon, Jun 5, 2023 at 12:40 AM Puranjay Mohan <puranjay12@gmail.com> wrote:
> > >
> > > Use bpf_jit_binary_pack_alloc for memory management of JIT binaries in
> > > ARM64 BPF JIT. The bpf_jit_binary_pack_alloc creates a pair of RW and RX
> > > buffers. The JIT writes the program into the RW buffer. When the JIT is
> > > done, the program is copied to the final ROX buffer
> > > with bpf_jit_binary_pack_finalize.
> > >
> > > Implement bpf_arch_text_copy() and bpf_arch_text_invalidate() for ARM64
> > > JIT as these functions are required by bpf_jit_binary_pack allocator.
> > >
> > > Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
> > > ---
> > >  arch/arm64/net/bpf_jit_comp.c | 119 +++++++++++++++++++++++++++++-----
> > >  1 file changed, 102 insertions(+), 17 deletions(-)
> > >
> > > diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> > > index 145b540ec34f..ee9414cadea8 100644
> > > --- a/arch/arm64/net/bpf_jit_comp.c
> > > +++ b/arch/arm64/net/bpf_jit_comp.c
> > > @@ -76,6 +76,7 @@ struct jit_ctx {
> > >         int *offset;
> > >         int exentry_idx;
> > >         __le32 *image;
> > > +       __le32 *ro_image;
> >
> > We are using:
> > image vs. ro_image
> > rw_header vs. header
> > rw_image_ptr vs. image_ptr
>
> Will use "rw_image" and "image" in the next version.
>
> >
> > Shall we be more consistent with rw_ or ro_ prefix?
> >
> > >         u32 stack_size;
> > >         int fpb_offset;
> > >  };
> > > @@ -205,6 +206,20 @@ static void jit_fill_hole(void *area, unsigned int size)
> > >                 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
> > >  }
> > >
> > > +int bpf_arch_text_invalidate(void *dst, size_t len)
> > > +{
> > > +       __le32 *ptr;
> > > +       int ret;
> > > +
> > > +       for (ptr = dst; len >= sizeof(u32); len -= sizeof(u32)) {
> > > +               ret = aarch64_insn_patch_text_nosync(ptr++, AARCH64_BREAK_FAULT);
> >
> > I think one aarch64_insn_patch_text_nosync() per 4 byte is too much overhead.
> > Shall we add a helper to do this in bigger patches?
>
> What would be the most efficient way to build this helper? As arm64 doesn't
> have the __text_poke() API. Calling copy_to_kernel_nofault() in a loop might
> not be the best way. One way would be to use __put_kernel_nofault() directly.
>
> Also, what should we call this helper? aarch64_insn_memset() ?

I just found aarch64_insn_patch_text_cb() also calls
aarch64_insn_patch_text_nosync() in a loop. So it is probably OK as-is?

Thanks,
Song
Puranjay Mohan June 5, 2023, 8:19 p.m. UTC | #4
On Mon, Jun 5, 2023 at 10:13 PM Song Liu <song@kernel.org> wrote:
>
> On Mon, Jun 5, 2023 at 11:34 AM Puranjay Mohan <puranjay12@gmail.com> wrote:
> >
> > Hi,
> >
> > On Mon, Jun 5, 2023 at 7:05 PM Song Liu <song@kernel.org> wrote:
> > >
> > > On Mon, Jun 5, 2023 at 12:40 AM Puranjay Mohan <puranjay12@gmail.com> wrote:
> > > >
> > > > Use bpf_jit_binary_pack_alloc for memory management of JIT binaries in
> > > > ARM64 BPF JIT. The bpf_jit_binary_pack_alloc creates a pair of RW and RX
> > > > buffers. The JIT writes the program into the RW buffer. When the JIT is
> > > > done, the program is copied to the final ROX buffer
> > > > with bpf_jit_binary_pack_finalize.
> > > >
> > > > Implement bpf_arch_text_copy() and bpf_arch_text_invalidate() for ARM64
> > > > JIT as these functions are required by bpf_jit_binary_pack allocator.
> > > >
> > > > Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
> > > > ---
> > > >  arch/arm64/net/bpf_jit_comp.c | 119 +++++++++++++++++++++++++++++-----
> > > >  1 file changed, 102 insertions(+), 17 deletions(-)
> > > >
> > > > diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> > > > index 145b540ec34f..ee9414cadea8 100644
> > > > --- a/arch/arm64/net/bpf_jit_comp.c
> > > > +++ b/arch/arm64/net/bpf_jit_comp.c
> > > > @@ -76,6 +76,7 @@ struct jit_ctx {
> > > >         int *offset;
> > > >         int exentry_idx;
> > > >         __le32 *image;
> > > > +       __le32 *ro_image;
> > >
> > > We are using:
> > > image vs. ro_image
> > > rw_header vs. header
> > > rw_image_ptr vs. image_ptr
> >
> > Will use "rw_image" and "image" in the next version.
> >
> > >
> > > Shall we be more consistent with rw_ or ro_ prefix?
> > >
> > > >         u32 stack_size;
> > > >         int fpb_offset;
> > > >  };
> > > > @@ -205,6 +206,20 @@ static void jit_fill_hole(void *area, unsigned int size)
> > > >                 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
> > > >  }
> > > >
> > > > +int bpf_arch_text_invalidate(void *dst, size_t len)
> > > > +{
> > > > +       __le32 *ptr;
> > > > +       int ret;
> > > > +
> > > > +       for (ptr = dst; len >= sizeof(u32); len -= sizeof(u32)) {
> > > > +               ret = aarch64_insn_patch_text_nosync(ptr++, AARCH64_BREAK_FAULT);
> > >
> > > I think one aarch64_insn_patch_text_nosync() per 4 byte is too much overhead.
> > > Shall we add a helper to do this in bigger patches?
> >
> > What would be the most efficient way to build this helper? As arm64 doesn't
> > have the __text_poke() API. Calling copy_to_kernel_nofault() in a loop might
> > not be the best way. One way would be to use __put_kernel_nofault() directly.
> >
> > Also, what should we call this helper? aarch64_insn_memset() ?
>
> I just found aarch64_insn_patch_text_cb() also calls
> aarch64_insn_patch_text_nosync() in a loop. So it is probably OK as-is?

Okay, then we can go ahead with this.

Another thing about the consistency of rw_ and ro_ prefix.
The ctx->image is used all over the place in the JIT, so changing it would
require a lot of changes. Therefore the naming convention that I will follow is
"image" and "ro_image". By this naming convention, ctx->image can be left
untouched and only ro_image would be used at some places like:
-       prog->bpf_func = (void *)ctx.image;
+       prog->bpf_func = (void *)ctx.ro_image;
etc.

I will use this in the next version of the patch.

Thanks,
Puranjay
diff mbox series

Patch

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 145b540ec34f..ee9414cadea8 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -76,6 +76,7 @@  struct jit_ctx {
 	int *offset;
 	int exentry_idx;
 	__le32 *image;
+	__le32 *ro_image;
 	u32 stack_size;
 	int fpb_offset;
 };
@@ -205,6 +206,20 @@  static void jit_fill_hole(void *area, unsigned int size)
 		*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
 }
 
+int bpf_arch_text_invalidate(void *dst, size_t len)
+{
+	__le32 *ptr;
+	int ret;
+
+	for (ptr = dst; len >= sizeof(u32); len -= sizeof(u32)) {
+		ret = aarch64_insn_patch_text_nosync(ptr++, AARCH64_BREAK_FAULT);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static inline int epilogue_offset(const struct jit_ctx *ctx)
 {
 	int to = ctx->epilogue_offset;
@@ -701,7 +716,8 @@  static int add_exception_handler(const struct bpf_insn *insn,
 				 struct jit_ctx *ctx,
 				 int dst_reg)
 {
-	off_t offset;
+	off_t ins_offset;
+	off_t fixup_offset;
 	unsigned long pc;
 	struct exception_table_entry *ex;
 
@@ -717,12 +733,11 @@  static int add_exception_handler(const struct bpf_insn *insn,
 		return -EINVAL;
 
 	ex = &ctx->prog->aux->extable[ctx->exentry_idx];
-	pc = (unsigned long)&ctx->image[ctx->idx - 1];
+	pc = (unsigned long)&ctx->ro_image[ctx->idx - 1];
 
-	offset = pc - (long)&ex->insn;
-	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
+	ins_offset = pc - (long)&ex->insn;
+	if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN))
 		return -ERANGE;
-	ex->insn = offset;
 
 	/*
 	 * Since the extable follows the program, the fixup offset is always
@@ -732,11 +747,20 @@  static int add_exception_handler(const struct bpf_insn *insn,
 	 * modifying the upper bits because the table is already sorted, and
 	 * isn't part of the main exception table.
 	 */
-	offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
-	if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
+	fixup_offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
+	if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
 		return -ERANGE;
 
-	ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
+	/*
+	 * The offsets above have been calculated using the RO+X buffer but we
+	 * need to use the R/W buffer for writes.
+	 * switch ex to rw buffer for writing.
+	 */
+	ex = (void *)ctx->image + ((void *)ex - (void *)ctx->ro_image);
+
+	ex->insn = ins_offset;
+
+	ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
 		    FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
 
 	ex->type = EX_TYPE_BPF;
@@ -1446,6 +1470,7 @@  static inline void bpf_flush_icache(void *start, void *end)
 
 struct arm64_jit_data {
 	struct bpf_binary_header *header;
+	struct bpf_binary_header *rw_header;
 	u8 *image;
 	struct jit_ctx ctx;
 };
@@ -1454,6 +1479,7 @@  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
 	int image_size, prog_size, extable_size, extable_align, extable_offset;
 	struct bpf_prog *tmp, *orig_prog = prog;
+	struct bpf_binary_header *rw_header;
 	struct bpf_binary_header *header;
 	struct arm64_jit_data *jit_data;
 	bool was_classic = bpf_prog_was_classic(prog);
@@ -1461,6 +1487,7 @@  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	bool extra_pass = false;
 	struct jit_ctx ctx;
 	u8 *image_ptr;
+	u8 *rw_image_ptr;
 
 	if (!prog->jit_requested)
 		return orig_prog;
@@ -1489,6 +1516,9 @@  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		ctx = jit_data->ctx;
 		image_ptr = jit_data->image;
 		header = jit_data->header;
+		rw_header = jit_data->rw_header;
+		rw_image_ptr = (void *)rw_header + ((void *)image_ptr
+						 - (void *)header);
 		extra_pass = true;
 		prog_size = sizeof(u32) * ctx.idx;
 		goto skip_init_ctx;
@@ -1533,8 +1563,9 @@  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	/* also allocate space for plt target */
 	extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
 	image_size = extable_offset + extable_size;
-	header = bpf_jit_binary_alloc(image_size, &image_ptr,
-				      sizeof(u32), jit_fill_hole);
+	header = bpf_jit_binary_pack_alloc(image_size, &image_ptr, sizeof(u32),
+					   &rw_header, &rw_image_ptr,
+					   jit_fill_hole);
 	if (header == NULL) {
 		prog = orig_prog;
 		goto out_off;
@@ -1542,19 +1573,24 @@  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
 	/* 2. Now, the actual pass. */
 
-	ctx.image = (__le32 *)image_ptr;
 	if (extable_size)
 		prog->aux->extable = (void *)image_ptr + extable_offset;
 skip_init_ctx:
+	/*
+	 * Use the rw_image_ptr for writing the JITed instructions.
+	 * Save the read only image_ptr in ctx because it will be used to
+	 * calculate offsets for filling out the exception table later.
+	 */
+	ctx.image = (__le32 *)rw_image_ptr;
+	ctx.ro_image = (__le32 *)image_ptr;
 	ctx.idx = 0;
 	ctx.exentry_idx = 0;
 
 	build_prologue(&ctx, was_classic);
 
 	if (build_body(&ctx, extra_pass)) {
-		bpf_jit_binary_free(header);
 		prog = orig_prog;
-		goto out_off;
+		goto out_free_hdr;
 	}
 
 	build_epilogue(&ctx);
@@ -1562,32 +1598,42 @@  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
 	/* 3. Extra pass to validate JITed code. */
 	if (validate_ctx(&ctx)) {
-		bpf_jit_binary_free(header);
 		prog = orig_prog;
-		goto out_off;
+		goto out_free_hdr;
 	}
 
 	/* And we're done. */
 	if (bpf_jit_enable > 1)
 		bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
 
+	/*
+	 * As the JITed instructions have been written to the R/W buffer, we can
+	 * move ctx.image back to the RO+X buffer from where the BPF program
+	 * will run. bpf_jit_binary_pack_finalize() will copy the instructions
+	 * from the R/W buffer to the RO+X buffer.
+	 */
+	ctx.image = (__le32 *)image_ptr;
 	bpf_flush_icache(header, ctx.image + ctx.idx);
 
 	if (!prog->is_func || extra_pass) {
 		if (extra_pass && ctx.idx != jit_data->ctx.idx) {
 			pr_err_once("multi-func JIT bug %d != %d\n",
 				    ctx.idx, jit_data->ctx.idx);
-			bpf_jit_binary_free(header);
 			prog->bpf_func = NULL;
 			prog->jited = 0;
 			prog->jited_len = 0;
+			goto out_free_hdr;
+		}
+		if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header,
+							 rw_header))) {
+			header = NULL;
 			goto out_off;
 		}
-		bpf_jit_binary_lock_ro(header);
 	} else {
 		jit_data->ctx = ctx;
 		jit_data->image = image_ptr;
 		jit_data->header = header;
+		jit_data->rw_header = rw_header;
 	}
 	prog->bpf_func = (void *)ctx.image;
 	prog->jited = 1;
@@ -1610,6 +1656,14 @@  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
 					   tmp : orig_prog);
 	return prog;
+
+out_free_hdr:
+	if (header) {
+		bpf_arch_text_copy(&header->size, &rw_header->size,
+				   sizeof(rw_header->size));
+		bpf_jit_binary_pack_free(header, rw_header);
+	}
+	goto out_off;
 }
 
 bool bpf_jit_supports_kfunc_call(void)
@@ -1617,6 +1671,13 @@  bool bpf_jit_supports_kfunc_call(void)
 	return true;
 }
 
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+	if (aarch64_insn_copy(dst, src, len) == NULL)
+		return ERR_PTR(-EINVAL);
+	return dst;
+}
+
 u64 bpf_jit_alloc_exec_limit(void)
 {
 	return VMALLOC_END - VMALLOC_START;
@@ -2221,3 +2282,27 @@  int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
 
 	return ret;
 }
+
+void bpf_jit_free(struct bpf_prog *prog)
+{
+	if (prog->jited) {
+		struct arm64_jit_data *jit_data = prog->aux->jit_data;
+		struct bpf_binary_header *hdr;
+
+		/*
+		 * If we fail the final pass of JIT (from jit_subprogs),
+		 * the program may not be finalized yet. Call finalize here
+		 * before freeing it.
+		 */
+		if (jit_data) {
+			bpf_jit_binary_pack_finalize(prog, jit_data->header,
+						     jit_data->rw_header);
+			kfree(jit_data);
+		}
+		hdr = bpf_jit_binary_pack_hdr(prog);
+		bpf_jit_binary_pack_free(hdr, NULL);
+		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
+	}
+
+	bpf_prog_unlock_free(prog);
+}