diff mbox series

[v2,08/14] arm64: mask PAC bits of __builtin_return_address

Message ID 1574166746-27197-9-git-send-email-amit.kachhap@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: return address signing | expand

Commit Message

Amit Daniel Kachhap Nov. 19, 2019, 12:32 p.m. UTC
This patch redefines __builtin_return_address to mask pac bits
when Pointer Authentication is enabled. As __builtin_return_address
is used mostly used to refer to the caller function symbol address
so masking runtime generated pac bits will help to find the match.

This change fixes the utilities like cat /proc/vmallocinfo to now
show the correct logs.

Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
---
Change since last version:
 * Comment modified.

 arch/arm64/Kconfig                |  1 +
 arch/arm64/include/asm/compiler.h | 17 +++++++++++++++++
 2 files changed, 18 insertions(+)
 create mode 100644 arch/arm64/include/asm/compiler.h

Comments

Ard Biesheuvel Nov. 21, 2019, 5:42 p.m. UTC | #1
On Tue, 19 Nov 2019 at 13:33, Amit Daniel Kachhap <amit.kachhap@arm.com> wrote:
>
> This patch redefines __builtin_return_address to mask pac bits
> when Pointer Authentication is enabled. As __builtin_return_address
> is used mostly used to refer to the caller function symbol address
> so masking runtime generated pac bits will help to find the match.
>
> This change fixes the utilities like cat /proc/vmallocinfo to now
> show the correct logs.
>
> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
> ---
> Change since last version:
>  * Comment modified.
>
>  arch/arm64/Kconfig                |  1 +
>  arch/arm64/include/asm/compiler.h | 17 +++++++++++++++++
>  2 files changed, 18 insertions(+)
>  create mode 100644 arch/arm64/include/asm/compiler.h
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 998248e..c1844de 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -117,6 +117,7 @@ config ARM64
>         select HAVE_ALIGNED_STRUCT_PAGE if SLUB
>         select HAVE_ARCH_AUDITSYSCALL
>         select HAVE_ARCH_BITREVERSE
> +       select HAVE_ARCH_COMPILER_H
>         select HAVE_ARCH_HUGE_VMAP
>         select HAVE_ARCH_JUMP_LABEL
>         select HAVE_ARCH_JUMP_LABEL_RELATIVE
> diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
> new file mode 100644
> index 0000000..5efe310
> --- /dev/null
> +++ b/arch/arm64/include/asm/compiler.h
> @@ -0,0 +1,17 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_ARM_COMPILER_H
> +#define __ASM_ARM_COMPILER_H
> +
> +#ifndef __ASSEMBLY__
> +
> +#if defined(CONFIG_ARM64_PTR_AUTH)
> +
> +/* As TBI1 is disabled currently, so bits 63:56 also has PAC */
> +#define __builtin_return_address(val)                          \
> +       (void *)((unsigned long)__builtin_return_address(val) | \
> +       (GENMASK_ULL(63, 56) | GENMASK_ULL(54, VA_BITS)))
> +#endif
> +
> +#endif
> +
> +#endif /* __ASM_ARM_COMPILER_H */

It seems to me like we are accumulating a lot of cruft for khwasan as
well as PAC to convert address into their untagged format.

Are there are untagging helpers we can already reuse? If not, can we
introduce something that can be shared between all these use cases?
Richard Henderson Nov. 22, 2019, 8:48 a.m. UTC | #2
On 11/21/19 5:42 PM, Ard Biesheuvel wrote:
> On Tue, 19 Nov 2019 at 13:33, Amit Daniel Kachhap <amit.kachhap@arm.com> wrote:
>>
>> This patch redefines __builtin_return_address to mask pac bits
>> when Pointer Authentication is enabled. As __builtin_return_address
>> is used mostly used to refer to the caller function symbol address
>> so masking runtime generated pac bits will help to find the match.
>>
>> This change fixes the utilities like cat /proc/vmallocinfo to now
>> show the correct logs.
>>
>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
>> ---
>> Change since last version:
>>  * Comment modified.
>>
>>  arch/arm64/Kconfig                |  1 +
>>  arch/arm64/include/asm/compiler.h | 17 +++++++++++++++++
>>  2 files changed, 18 insertions(+)
>>  create mode 100644 arch/arm64/include/asm/compiler.h
>>
>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>> index 998248e..c1844de 100644
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
>> @@ -117,6 +117,7 @@ config ARM64
>>         select HAVE_ALIGNED_STRUCT_PAGE if SLUB
>>         select HAVE_ARCH_AUDITSYSCALL
>>         select HAVE_ARCH_BITREVERSE
>> +       select HAVE_ARCH_COMPILER_H
>>         select HAVE_ARCH_HUGE_VMAP
>>         select HAVE_ARCH_JUMP_LABEL
>>         select HAVE_ARCH_JUMP_LABEL_RELATIVE
>> diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
>> new file mode 100644
>> index 0000000..5efe310
>> --- /dev/null
>> +++ b/arch/arm64/include/asm/compiler.h
>> @@ -0,0 +1,17 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +#ifndef __ASM_ARM_COMPILER_H
>> +#define __ASM_ARM_COMPILER_H
>> +
>> +#ifndef __ASSEMBLY__
>> +
>> +#if defined(CONFIG_ARM64_PTR_AUTH)
>> +
>> +/* As TBI1 is disabled currently, so bits 63:56 also has PAC */
>> +#define __builtin_return_address(val)                          \
>> +       (void *)((unsigned long)__builtin_return_address(val) | \
>> +       (GENMASK_ULL(63, 56) | GENMASK_ULL(54, VA_BITS)))
>> +#endif
>> +
>> +#endif
>> +
>> +#endif /* __ASM_ARM_COMPILER_H */
> 
> It seems to me like we are accumulating a lot of cruft for khwasan as
> well as PAC to convert address into their untagged format.
> 
> Are there are untagging helpers we can already reuse? If not, can we
> introduce something that can be shared between all these use cases?

xpaci will strip the pac from an instruction pointer, but requires the
instruction set to be enabled, so you'd have to fiddle with alternatives.  You
*could* force the use of lr as input/output and use xpaclri, which is a nop if
the instruction set is not enabled.

Also, this definition of is not correct, because bit 55 needs to be propagated
to all of the bits being masked out here, so that you get a large negative
number for kernel space addresses.


r~
Ard Biesheuvel Nov. 22, 2019, 1:27 p.m. UTC | #3
On Fri, 22 Nov 2019 at 09:48, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> On 11/21/19 5:42 PM, Ard Biesheuvel wrote:
> > On Tue, 19 Nov 2019 at 13:33, Amit Daniel Kachhap <amit.kachhap@arm.com> wrote:
> >>
> >> This patch redefines __builtin_return_address to mask pac bits
> >> when Pointer Authentication is enabled. As __builtin_return_address
> >> is used mostly used to refer to the caller function symbol address
> >> so masking runtime generated pac bits will help to find the match.
> >>
> >> This change fixes the utilities like cat /proc/vmallocinfo to now
> >> show the correct logs.
> >>
> >> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
> >> ---
> >> Change since last version:
> >>  * Comment modified.
> >>
> >>  arch/arm64/Kconfig                |  1 +
> >>  arch/arm64/include/asm/compiler.h | 17 +++++++++++++++++
> >>  2 files changed, 18 insertions(+)
> >>  create mode 100644 arch/arm64/include/asm/compiler.h
> >>
> >> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> >> index 998248e..c1844de 100644
> >> --- a/arch/arm64/Kconfig
> >> +++ b/arch/arm64/Kconfig
> >> @@ -117,6 +117,7 @@ config ARM64
> >>         select HAVE_ALIGNED_STRUCT_PAGE if SLUB
> >>         select HAVE_ARCH_AUDITSYSCALL
> >>         select HAVE_ARCH_BITREVERSE
> >> +       select HAVE_ARCH_COMPILER_H
> >>         select HAVE_ARCH_HUGE_VMAP
> >>         select HAVE_ARCH_JUMP_LABEL
> >>         select HAVE_ARCH_JUMP_LABEL_RELATIVE
> >> diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
> >> new file mode 100644
> >> index 0000000..5efe310
> >> --- /dev/null
> >> +++ b/arch/arm64/include/asm/compiler.h
> >> @@ -0,0 +1,17 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +#ifndef __ASM_ARM_COMPILER_H
> >> +#define __ASM_ARM_COMPILER_H
> >> +
> >> +#ifndef __ASSEMBLY__
> >> +
> >> +#if defined(CONFIG_ARM64_PTR_AUTH)
> >> +
> >> +/* As TBI1 is disabled currently, so bits 63:56 also has PAC */
> >> +#define __builtin_return_address(val)                          \
> >> +       (void *)((unsigned long)__builtin_return_address(val) | \
> >> +       (GENMASK_ULL(63, 56) | GENMASK_ULL(54, VA_BITS)))
> >> +#endif
> >> +
> >> +#endif
> >> +
> >> +#endif /* __ASM_ARM_COMPILER_H */
> >
> > It seems to me like we are accumulating a lot of cruft for khwasan as
> > well as PAC to convert address into their untagged format.
> >
> > Are there are untagging helpers we can already reuse? If not, can we
> > introduce something that can be shared between all these use cases?
>
> xpaci will strip the pac from an instruction pointer, but requires the
> instruction set to be enabled, so you'd have to fiddle with alternatives.  You
> *could* force the use of lr as input/output and use xpaclri, which is a nop if
> the instruction set is not enabled.
>
> Also, this definition of is not correct, because bit 55 needs to be propagated
> to all of the bits being masked out here, so that you get a large negative
> number for kernel space addresses.
>

Indeed. Even though bit 55 is generally guaranteed to be set, it would
be better to simply reuse ptrauth_strip_insn_pac() that you introduce
in the next patch.

Also, please use __ASM_COMPILER_H as the header guard (which is more
idiomatic), and drop the unnecessary 'ifndef __ASSEMBLY__'.

Finally, could you add a comment that this header is transitively
included (via include/compiler_types.h) on the compiler command line,
so it is guaranteed to be loaded by users of this macro, and so there
is no risk of the wrong version being used.
Amit Daniel Kachhap Nov. 25, 2019, 5:42 a.m. UTC | #4
Hi Ard,

On 11/21/19 11:12 PM, Ard Biesheuvel wrote:
> On Tue, 19 Nov 2019 at 13:33, Amit Daniel Kachhap <amit.kachhap@arm.com> wrote:
>>
>> This patch redefines __builtin_return_address to mask pac bits
>> when Pointer Authentication is enabled. As __builtin_return_address
>> is used mostly used to refer to the caller function symbol address
>> so masking runtime generated pac bits will help to find the match.
>>
>> This change fixes the utilities like cat /proc/vmallocinfo to now
>> show the correct logs.
>>
>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
>> ---
>> Change since last version:
>>   * Comment modified.
>>
>>   arch/arm64/Kconfig                |  1 +
>>   arch/arm64/include/asm/compiler.h | 17 +++++++++++++++++
>>   2 files changed, 18 insertions(+)
>>   create mode 100644 arch/arm64/include/asm/compiler.h
>>
>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>> index 998248e..c1844de 100644
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
>> @@ -117,6 +117,7 @@ config ARM64
>>          select HAVE_ALIGNED_STRUCT_PAGE if SLUB
>>          select HAVE_ARCH_AUDITSYSCALL
>>          select HAVE_ARCH_BITREVERSE
>> +       select HAVE_ARCH_COMPILER_H
>>          select HAVE_ARCH_HUGE_VMAP
>>          select HAVE_ARCH_JUMP_LABEL
>>          select HAVE_ARCH_JUMP_LABEL_RELATIVE
>> diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
>> new file mode 100644
>> index 0000000..5efe310
>> --- /dev/null
>> +++ b/arch/arm64/include/asm/compiler.h
>> @@ -0,0 +1,17 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +#ifndef __ASM_ARM_COMPILER_H
>> +#define __ASM_ARM_COMPILER_H
>> +
>> +#ifndef __ASSEMBLY__
>> +
>> +#if defined(CONFIG_ARM64_PTR_AUTH)
>> +
>> +/* As TBI1 is disabled currently, so bits 63:56 also has PAC */
>> +#define __builtin_return_address(val)                          \
>> +       (void *)((unsigned long)__builtin_return_address(val) | \
>> +       (GENMASK_ULL(63, 56) | GENMASK_ULL(54, VA_BITS)))
>> +#endif
>> +
>> +#endif
>> +
>> +#endif /* __ASM_ARM_COMPILER_H */
> 
> It seems to me like we are accumulating a lot of cruft for khwasan as
> well as PAC to convert address into their untagged format.

ok

> 
> Are there are untagging helpers we can already reuse? If not, can we
> introduce something that can be shared between all these use cases?

I tried to include <asm/pointer_auth.h> here but it produced lot of 
header inclusion error as include/linux/compiler_types.h which includes 
it is a very sensitive header.
I will check to add some kind of header or at least write proper commit 
logs.

Regards,
Amit D
>
Amit Daniel Kachhap Nov. 25, 2019, 9:12 a.m. UTC | #5
Hi,

On 11/22/19 2:18 PM, Richard Henderson wrote:
> On 11/21/19 5:42 PM, Ard Biesheuvel wrote:
>> On Tue, 19 Nov 2019 at 13:33, Amit Daniel Kachhap <amit.kachhap@arm.com> wrote:
>>>
>>> This patch redefines __builtin_return_address to mask pac bits
>>> when Pointer Authentication is enabled. As __builtin_return_address
>>> is used mostly used to refer to the caller function symbol address
>>> so masking runtime generated pac bits will help to find the match.
>>>
>>> This change fixes the utilities like cat /proc/vmallocinfo to now
>>> show the correct logs.
>>>
>>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
>>> ---
>>> Change since last version:
>>>   * Comment modified.
>>>
>>>   arch/arm64/Kconfig                |  1 +
>>>   arch/arm64/include/asm/compiler.h | 17 +++++++++++++++++
>>>   2 files changed, 18 insertions(+)
>>>   create mode 100644 arch/arm64/include/asm/compiler.h
>>>
>>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>>> index 998248e..c1844de 100644
>>> --- a/arch/arm64/Kconfig
>>> +++ b/arch/arm64/Kconfig
>>> @@ -117,6 +117,7 @@ config ARM64
>>>          select HAVE_ALIGNED_STRUCT_PAGE if SLUB
>>>          select HAVE_ARCH_AUDITSYSCALL
>>>          select HAVE_ARCH_BITREVERSE
>>> +       select HAVE_ARCH_COMPILER_H
>>>          select HAVE_ARCH_HUGE_VMAP
>>>          select HAVE_ARCH_JUMP_LABEL
>>>          select HAVE_ARCH_JUMP_LABEL_RELATIVE
>>> diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
>>> new file mode 100644
>>> index 0000000..5efe310
>>> --- /dev/null
>>> +++ b/arch/arm64/include/asm/compiler.h
>>> @@ -0,0 +1,17 @@
>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>> +#ifndef __ASM_ARM_COMPILER_H
>>> +#define __ASM_ARM_COMPILER_H
>>> +
>>> +#ifndef __ASSEMBLY__
>>> +
>>> +#if defined(CONFIG_ARM64_PTR_AUTH)
>>> +
>>> +/* As TBI1 is disabled currently, so bits 63:56 also has PAC */
>>> +#define __builtin_return_address(val)                          \
>>> +       (void *)((unsigned long)__builtin_return_address(val) | \
>>> +       (GENMASK_ULL(63, 56) | GENMASK_ULL(54, VA_BITS)))
>>> +#endif
>>> +
>>> +#endif
>>> +
>>> +#endif /* __ASM_ARM_COMPILER_H */
>>
>> It seems to me like we are accumulating a lot of cruft for khwasan as
>> well as PAC to convert address into their untagged format.
>>
>> Are there are untagging helpers we can already reuse? If not, can we
>> introduce something that can be shared between all these use cases?
> 
> xpaci will strip the pac from an instruction pointer, but requires the
> instruction set to be enabled, so you'd have to fiddle with alternatives.  You
> *could* force the use of lr as input/output and use xpaclri, which is a nop if
> the instruction set is not enabled.

xpaclri instruction seems easy to implement as including any header here 
"alternative.h" creates lot of header inclusion error. Thanks for the 
suggestion.

> 
> Also, this definition of is not correct, because bit 55 needs to be propagated
> to all of the bits being masked out here, so that you get a large negative
> number for kernel space addresses.

Yes agree.

Regards,
Amit D
> 
> 
> r~
>
Amit Daniel Kachhap Nov. 25, 2019, 9:18 a.m. UTC | #6
Hi,

On 11/22/19 6:57 PM, Ard Biesheuvel wrote:
> On Fri, 22 Nov 2019 at 09:48, Richard Henderson
> <richard.henderson@linaro.org> wrote:
>>
>> On 11/21/19 5:42 PM, Ard Biesheuvel wrote:
>>> On Tue, 19 Nov 2019 at 13:33, Amit Daniel Kachhap <amit.kachhap@arm.com> wrote:
>>>>
>>>> This patch redefines __builtin_return_address to mask pac bits
>>>> when Pointer Authentication is enabled. As __builtin_return_address
>>>> is used mostly used to refer to the caller function symbol address
>>>> so masking runtime generated pac bits will help to find the match.
>>>>
>>>> This change fixes the utilities like cat /proc/vmallocinfo to now
>>>> show the correct logs.
>>>>
>>>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
>>>> ---
>>>> Change since last version:
>>>>   * Comment modified.
>>>>
>>>>   arch/arm64/Kconfig                |  1 +
>>>>   arch/arm64/include/asm/compiler.h | 17 +++++++++++++++++
>>>>   2 files changed, 18 insertions(+)
>>>>   create mode 100644 arch/arm64/include/asm/compiler.h
>>>>
>>>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>>>> index 998248e..c1844de 100644
>>>> --- a/arch/arm64/Kconfig
>>>> +++ b/arch/arm64/Kconfig
>>>> @@ -117,6 +117,7 @@ config ARM64
>>>>          select HAVE_ALIGNED_STRUCT_PAGE if SLUB
>>>>          select HAVE_ARCH_AUDITSYSCALL
>>>>          select HAVE_ARCH_BITREVERSE
>>>> +       select HAVE_ARCH_COMPILER_H
>>>>          select HAVE_ARCH_HUGE_VMAP
>>>>          select HAVE_ARCH_JUMP_LABEL
>>>>          select HAVE_ARCH_JUMP_LABEL_RELATIVE
>>>> diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
>>>> new file mode 100644
>>>> index 0000000..5efe310
>>>> --- /dev/null
>>>> +++ b/arch/arm64/include/asm/compiler.h
>>>> @@ -0,0 +1,17 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +#ifndef __ASM_ARM_COMPILER_H
>>>> +#define __ASM_ARM_COMPILER_H
>>>> +
>>>> +#ifndef __ASSEMBLY__
>>>> +
>>>> +#if defined(CONFIG_ARM64_PTR_AUTH)
>>>> +
>>>> +/* As TBI1 is disabled currently, so bits 63:56 also has PAC */
>>>> +#define __builtin_return_address(val)                          \
>>>> +       (void *)((unsigned long)__builtin_return_address(val) | \
>>>> +       (GENMASK_ULL(63, 56) | GENMASK_ULL(54, VA_BITS)))
>>>> +#endif
>>>> +
>>>> +#endif
>>>> +
>>>> +#endif /* __ASM_ARM_COMPILER_H */
>>>
>>> It seems to me like we are accumulating a lot of cruft for khwasan as
>>> well as PAC to convert address into their untagged format.
>>>
>>> Are there are untagging helpers we can already reuse? If not, can we
>>> introduce something that can be shared between all these use cases?
>>
>> xpaci will strip the pac from an instruction pointer, but requires the
>> instruction set to be enabled, so you'd have to fiddle with alternatives.  You
>> *could* force the use of lr as input/output and use xpaclri, which is a nop if
>> the instruction set is not enabled.
>>
>> Also, this definition of is not correct, because bit 55 needs to be propagated
>> to all of the bits being masked out here, so that you get a large negative
>> number for kernel space addresses.
>>
> 
> Indeed. Even though bit 55 is generally guaranteed to be set, it would
> be better to simply reuse ptrauth_strip_insn_pac() that you introduce
> in the next patch.
Earlier I tried re-using but it produces lot of header inclusion error. 
I will check if it can be fixed.
> 
> Also, please use __ASM_COMPILER_H as the header guard (which is more
> idiomatic), and drop the unnecessary 'ifndef __ASSEMBLY__'.

Yes. sure.
> 
> Finally, could you add a comment that this header is transitively
> included (via include/compiler_types.h) on the compiler command line,
> so it is guaranteed to be loaded by users of this macro, and so there
> is no risk of the wrong version being used.

Yes sure.

>
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 998248e..c1844de 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -117,6 +117,7 @@  config ARM64
 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_BITREVERSE
+	select HAVE_ARCH_COMPILER_H
 	select HAVE_ARCH_HUGE_VMAP
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_JUMP_LABEL_RELATIVE
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
new file mode 100644
index 0000000..5efe310
--- /dev/null
+++ b/arch/arm64/include/asm/compiler.h
@@ -0,0 +1,17 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_COMPILER_H
+#define __ASM_ARM_COMPILER_H
+
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_ARM64_PTR_AUTH)
+
+/* As TBI1 is disabled currently, so bits 63:56 also has PAC */
+#define __builtin_return_address(val)				\
+	(void *)((unsigned long)__builtin_return_address(val) |	\
+	(GENMASK_ULL(63, 56) | GENMASK_ULL(54, VA_BITS)))
+#endif
+
+#endif
+
+#endif /* __ASM_ARM_COMPILER_H */