diff mbox series

[v2] libbpf: define bpf_tail_call_static when __clang__ is not defined

Message ID 20220912063514.2824432-1-james.hilliard1@gmail.com (mailing list archive)
State Rejected
Delegated to: BPF
Headers show
Series [v2] libbpf: define bpf_tail_call_static when __clang__ is not defined | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-4 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-5 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-1 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-6 success Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for test_maps on x86_64 with llvm-16

Commit Message

James Hilliard Sept. 12, 2022, 6:35 a.m. UTC
The bpf_tail_call_static function is currently not defined unless
using clang >= 8.

To support bpf_tail_call_static on GAS we can check if __clang__ is
not defined to enable bpf_tail_call_static.

We need to use a GAS assembly syntax check so that the assembler
is provided GAS compatible assembly as well.

We can use gasversion to provide a migration path to llvm syntax
for GAS once llvm syntax is natively supported.

Signed-off-by: James Hilliard <james.hilliard1@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
Changes v1 -> v2:
  - use gasversion to detect assembly variant
---
 tools/lib/bpf/bpf_helpers.h | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

Comments

Yonghong Song Sept. 12, 2022, 5:29 p.m. UTC | #1
On 9/12/22 7:35 AM, James Hilliard wrote:
> The bpf_tail_call_static function is currently not defined unless
> using clang >= 8.
> 
> To support bpf_tail_call_static on GAS we can check if __clang__ is
> not defined to enable bpf_tail_call_static.
> 
> We need to use a GAS assembly syntax check so that the assembler
> is provided GAS compatible assembly as well.
> 
> We can use gasversion to provide a migration path to llvm syntax
> for GAS once llvm syntax is natively supported.

I didn't see a gasversion comparison in asm code.
Is it possible that we compare gasversion to a known
gas version which supports new syntax? If the gasversion
is supported, use the same syntax as llvm. If the
gasversion is not supported, output an illegal insn
and it would be even better if some error information
is printed out on the screen.

> 
> Signed-off-by: James Hilliard <james.hilliard1@gmail.com>
> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
> ---
> Changes v1 -> v2:
>    - use gasversion to detect assembly variant
> ---
>   tools/lib/bpf/bpf_helpers.h | 18 ++++++++++++------
>   1 file changed, 12 insertions(+), 6 deletions(-)
> 
> diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
> index 7349b16b8e2f..5b98f5506798 100644
> --- a/tools/lib/bpf/bpf_helpers.h
> +++ b/tools/lib/bpf/bpf_helpers.h
> @@ -131,7 +131,7 @@
>   /*
>    * Helper function to perform a tail call with a constant/immediate map slot.
>    */
> -#if __clang_major__ >= 8 && defined(__bpf__)
> +#if (!defined(__clang__) || __clang_major__ >= 8) && defined(__bpf__)
>   static __always_inline void
>   bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
>   {
> @@ -139,8 +139,8 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
>   		__bpf_unreachable();
>   
>   	/*
> -	 * Provide a hard guarantee that LLVM won't optimize setting r2 (map
> -	 * pointer) and r3 (constant map index) from _different paths_ ending
> +	 * Provide a hard guarantee that the compiler won't optimize setting r2
> +	 * (map pointer) and r3 (constant map index) from _different paths_ ending
>   	 * up at the _same_ call insn as otherwise we won't be able to use the
>   	 * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
>   	 * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
> @@ -148,12 +148,18 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
>   	 *
>   	 * Note on clobber list: we need to stay in-line with BPF calling
>   	 * convention, so even if we don't end up using r0, r4, r5, we need
> -	 * to mark them as clobber so that LLVM doesn't end up using them
> -	 * before / after the call.
> +	 * to mark them as clobber so that the compiler doesn't end up using
> +	 * them before / after the call.
>   	 */
> -	asm volatile("r1 = %[ctx]\n\t"
> +	asm volatile(".ifdef .gasversion.\n\t"
> +		     "mov %%r1,%[ctx]\n\t"
> +		     "mov %%r2,%[map]\n\t"
> +		     "mov %%r3,%[slot]\n\t"
> +		     ".else\n\t"
> +		     "r1 = %[ctx]\n\t"
>   		     "r2 = %[map]\n\t"
>   		     "r3 = %[slot]\n\t"
> +		     ".endif\n\t"
>   		     "call 12"
>   		     :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
>   		     : "r0", "r1", "r2", "r3", "r4", "r5");
James Hilliard Sept. 12, 2022, 9:32 p.m. UTC | #2
On Mon, Sep 12, 2022 at 11:29 AM Yonghong Song <yhs@fb.com> wrote:
>
>
>
> On 9/12/22 7:35 AM, James Hilliard wrote:
> > The bpf_tail_call_static function is currently not defined unless
> > using clang >= 8.
> >
> > To support bpf_tail_call_static on GAS we can check if __clang__ is
> > not defined to enable bpf_tail_call_static.
> >
> > We need to use a GAS assembly syntax check so that the assembler
> > is provided GAS compatible assembly as well.
> >
> > We can use gasversion to provide a migration path to llvm syntax
> > for GAS once llvm syntax is natively supported.
>
> I didn't see a gasversion comparison in asm code.
> Is it possible that we compare gasversion to a known
> gas version which supports new syntax? If the gasversion
> is supported, use the same syntax as llvm. If the
> gasversion is not supported, output an illegal insn
> and it would be even better if some error information
> is printed out on the screen.

Yeah, once llvm syntax is supported in GAS the check would simply
need to be changed to something like:
.if .gasversion. < 24000

At least this seems to me to be the best way to provide a migration
path as we can't really check assembler versions from the compiler
like we can with llvm.

>
> >
> > Signed-off-by: James Hilliard <james.hilliard1@gmail.com>
> > Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
> > ---
> > Changes v1 -> v2:
> >    - use gasversion to detect assembly variant
> > ---
> >   tools/lib/bpf/bpf_helpers.h | 18 ++++++++++++------
> >   1 file changed, 12 insertions(+), 6 deletions(-)
> >
> > diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
> > index 7349b16b8e2f..5b98f5506798 100644
> > --- a/tools/lib/bpf/bpf_helpers.h
> > +++ b/tools/lib/bpf/bpf_helpers.h
> > @@ -131,7 +131,7 @@
> >   /*
> >    * Helper function to perform a tail call with a constant/immediate map slot.
> >    */
> > -#if __clang_major__ >= 8 && defined(__bpf__)
> > +#if (!defined(__clang__) || __clang_major__ >= 8) && defined(__bpf__)
> >   static __always_inline void
> >   bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
> >   {
> > @@ -139,8 +139,8 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
> >               __bpf_unreachable();
> >
> >       /*
> > -      * Provide a hard guarantee that LLVM won't optimize setting r2 (map
> > -      * pointer) and r3 (constant map index) from _different paths_ ending
> > +      * Provide a hard guarantee that the compiler won't optimize setting r2
> > +      * (map pointer) and r3 (constant map index) from _different paths_ ending
> >        * up at the _same_ call insn as otherwise we won't be able to use the
> >        * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
> >        * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
> > @@ -148,12 +148,18 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
> >        *
> >        * Note on clobber list: we need to stay in-line with BPF calling
> >        * convention, so even if we don't end up using r0, r4, r5, we need
> > -      * to mark them as clobber so that LLVM doesn't end up using them
> > -      * before / after the call.
> > +      * to mark them as clobber so that the compiler doesn't end up using
> > +      * them before / after the call.
> >        */
> > -     asm volatile("r1 = %[ctx]\n\t"
> > +     asm volatile(".ifdef .gasversion.\n\t"
> > +                  "mov %%r1,%[ctx]\n\t"
> > +                  "mov %%r2,%[map]\n\t"
> > +                  "mov %%r3,%[slot]\n\t"
> > +                  ".else\n\t"
> > +                  "r1 = %[ctx]\n\t"
> >                    "r2 = %[map]\n\t"
> >                    "r3 = %[slot]\n\t"
> > +                  ".endif\n\t"
> >                    "call 12"
> >                    :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
> >                    : "r0", "r1", "r2", "r3", "r4", "r5");
diff mbox series

Patch

diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 7349b16b8e2f..5b98f5506798 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -131,7 +131,7 @@ 
 /*
  * Helper function to perform a tail call with a constant/immediate map slot.
  */
-#if __clang_major__ >= 8 && defined(__bpf__)
+#if (!defined(__clang__) || __clang_major__ >= 8) && defined(__bpf__)
 static __always_inline void
 bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
 {
@@ -139,8 +139,8 @@  bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
 		__bpf_unreachable();
 
 	/*
-	 * Provide a hard guarantee that LLVM won't optimize setting r2 (map
-	 * pointer) and r3 (constant map index) from _different paths_ ending
+	 * Provide a hard guarantee that the compiler won't optimize setting r2
+	 * (map pointer) and r3 (constant map index) from _different paths_ ending
 	 * up at the _same_ call insn as otherwise we won't be able to use the
 	 * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
 	 * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
@@ -148,12 +148,18 @@  bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
 	 *
 	 * Note on clobber list: we need to stay in-line with BPF calling
 	 * convention, so even if we don't end up using r0, r4, r5, we need
-	 * to mark them as clobber so that LLVM doesn't end up using them
-	 * before / after the call.
+	 * to mark them as clobber so that the compiler doesn't end up using
+	 * them before / after the call.
 	 */
-	asm volatile("r1 = %[ctx]\n\t"
+	asm volatile(".ifdef .gasversion.\n\t"
+		     "mov %%r1,%[ctx]\n\t"
+		     "mov %%r2,%[map]\n\t"
+		     "mov %%r3,%[slot]\n\t"
+		     ".else\n\t"
+		     "r1 = %[ctx]\n\t"
 		     "r2 = %[map]\n\t"
 		     "r3 = %[slot]\n\t"
+		     ".endif\n\t"
 		     "call 12"
 		     :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
 		     : "r0", "r1", "r2", "r3", "r4", "r5");