diff mbox series

[v2] arm64: kasan: add interceptors for strcmp/strncmp functions

Message ID 1535014606-176525-1-git-send-email-kyeongdon.kim@lge.com (mailing list archive)
State New, archived
Headers show
Series [v2] arm64: kasan: add interceptors for strcmp/strncmp functions | expand

Commit Message

Kyeongdon Kim Aug. 23, 2018, 8:56 a.m. UTC
This patch declares strcmp/strncmp as weak symbols.
(2 of them are the most used string operations)

Original functions declared as weak and
strong ones in mm/kasan/kasan.c could replace them.

Assembly optimized strcmp/strncmp functions cannot detect KASan bug.
But, now we can detect them like the call trace below.

Comments

Kyeongdon Kim Sept. 3, 2018, 9:02 a.m. UTC | #1
Dear all,

Could anyone review this and provide me appropriate approach ?
I think str[n]cmp are frequently used functions so I believe very useful 
w/ arm64 KASAN.

Best Regards,
Kyeongdon Kim


On 2018-08-23 오후 5:56, Kyeongdon Kim wrote:
> This patch declares strcmp/strncmp as weak symbols.
> (2 of them are the most used string operations)
>
> Original functions declared as weak and
> strong ones in mm/kasan/kasan.c could replace them.
>
> Assembly optimized strcmp/strncmp functions cannot detect KASan bug.
> But, now we can detect them like the call trace below.
>
> ==================================================================
> BUG: KASAN: use-after-free in platform_match+0x1c/0x5c at addr 
> ffffffc0ad313500
> Read of size 1 by task swapper/0/1
> CPU: 3 PID: 1 Comm: swapper/0 Tainted: G B 4.9.77+ #1
> Hardware name: Generic (DT) based system
> Call trace:
> dump_backtrace+0x0/0x2e0
> show_stack+0x14/0x1c
> dump_stack+0x88/0xb0
> kasan_object_err+0x24/0x7c
> kasan_report+0x2f0/0x484
> check_memory_region+0x20/0x14c
> strcmp+0x1c/0x5c
> platform_match+0x40/0xe4
> __driver_attach+0x40/0x130
> bus_for_each_dev+0xc4/0xe0
> driver_attach+0x30/0x3c
> bus_add_driver+0x2dc/0x328
> driver_register+0x118/0x160
> __platform_driver_register+0x7c/0x88
> alarmtimer_init+0x154/0x1e4
> do_one_initcall+0x184/0x1a4
> kernel_init_freeable+0x2ec/0x2f0
> kernel_init+0x18/0x10c
> ret_from_fork+0x10/0x50
>
> In case of xtensa and x86_64 kasan, no need to use this patch now.
>
> Signed-off-by: Kyeongdon Kim <kyeongdon.kim@lge.com>
> ---
> arch/arm64/include/asm/string.h | 5 +++++
> arch/arm64/kernel/arm64ksyms.c | 2 ++
> arch/arm64/kernel/image.h | 2 ++
> arch/arm64/lib/strcmp.S | 3 +++
> arch/arm64/lib/strncmp.S | 3 +++
> mm/kasan/kasan.c | 23 +++++++++++++++++++++++
> 6 files changed, 38 insertions(+)
>
> diff --git a/arch/arm64/include/asm/string.h 
> b/arch/arm64/include/asm/string.h
> index dd95d33..ab60349 100644
> --- a/arch/arm64/include/asm/string.h
> +++ b/arch/arm64/include/asm/string.h
> @@ -24,9 +24,11 @@ extern char *strchr(const char *, int c);
>
> #define __HAVE_ARCH_STRCMP
> extern int strcmp(const char *, const char *);
> +extern int __strcmp(const char *, const char *);
>
> #define __HAVE_ARCH_STRNCMP
> extern int strncmp(const char *, const char *, __kernel_size_t);
> +extern int __strncmp(const char *, const char *, __kernel_size_t);
>
> #define __HAVE_ARCH_STRLEN
> extern __kernel_size_t strlen(const char *);
> @@ -68,6 +70,9 @@ void memcpy_flushcache(void *dst, const void *src, 
> size_t cnt);
> #define memmove(dst, src, len) __memmove(dst, src, len)
> #define memset(s, c, n) __memset(s, c, n)
>
> +#define strcmp(cs, ct) __strcmp(cs, ct)
> +#define strncmp(cs, ct, n) __strncmp(cs, ct, n)
> +
> #ifndef __NO_FORTIFY
> #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
> #endif
> diff --git a/arch/arm64/kernel/arm64ksyms.c 
> b/arch/arm64/kernel/arm64ksyms.c
> index d894a20..10b1164 100644
> --- a/arch/arm64/kernel/arm64ksyms.c
> +++ b/arch/arm64/kernel/arm64ksyms.c
> @@ -50,6 +50,8 @@ EXPORT_SYMBOL(strcmp);
> EXPORT_SYMBOL(strncmp);
> EXPORT_SYMBOL(strlen);
> EXPORT_SYMBOL(strnlen);
> +EXPORT_SYMBOL(__strcmp);
> +EXPORT_SYMBOL(__strncmp);
> EXPORT_SYMBOL(memset);
> EXPORT_SYMBOL(memcpy);
> EXPORT_SYMBOL(memmove);
> diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
> index a820ed0..5ef7a57 100644
> --- a/arch/arm64/kernel/image.h
> +++ b/arch/arm64/kernel/image.h
> @@ -110,6 +110,8 @@ __efistub___flush_dcache_area = 
> KALLSYMS_HIDE(__pi___flush_dcache_area);
> __efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
> __efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
> __efistub___memset = KALLSYMS_HIDE(__pi_memset);
> +__efistub___strcmp = KALLSYMS_HIDE(__pi_strcmp);
> +__efistub___strncmp = KALLSYMS_HIDE(__pi_strncmp);
> #endif
>
> __efistub__text = KALLSYMS_HIDE(_text);
> diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
> index 471fe61..0dffef7 100644
> --- a/arch/arm64/lib/strcmp.S
> +++ b/arch/arm64/lib/strcmp.S
> @@ -60,6 +60,8 @@ tmp3 .req x9
> zeroones .req x10
> pos .req x11
>
> +.weak strcmp
> +ENTRY(__strcmp)
> ENTRY(strcmp)
> eor tmp1, src1, src2
> mov zeroones, #REP8_01
> @@ -232,3 +234,4 @@ CPU_BE( orr syndrome, diff, has_nul )
> sub result, data1, data2, lsr #56
> ret
> ENDPIPROC(strcmp)
> +ENDPROC(__strcmp)
> diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
> index e267044..b2648c7 100644
> --- a/arch/arm64/lib/strncmp.S
> +++ b/arch/arm64/lib/strncmp.S
> @@ -64,6 +64,8 @@ limit_wd .req x13
> mask .req x14
> endloop .req x15
>
> +.weak strncmp
> +ENTRY(__strncmp)
> ENTRY(strncmp)
> cbz limit, .Lret0
> eor tmp1, src1, src2
> @@ -308,3 +310,4 @@ CPU_BE( orr syndrome, diff, has_nul )
> mov result, #0
> ret
> ENDPIPROC(strncmp)
> +ENDPROC(__strncmp)
> diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
> index c3bd520..61ad7f1 100644
> --- a/mm/kasan/kasan.c
> +++ b/mm/kasan/kasan.c
> @@ -304,6 +304,29 @@ void *memcpy(void *dest, const void *src, size_t 
> len)
>
> return __memcpy(dest, src, len);
> }
> +#ifdef CONFIG_ARM64
> +/*
> + * Arch arm64 use assembly variant for strcmp/strncmp,
> + * xtensa use inline asm operations and x86_64 use c one,
> + * so now this interceptors only for arm64 kasan.
> + */
> +#undef strcmp
> +int strcmp(const char *cs, const char *ct)
> +{
> + check_memory_region((unsigned long)cs, 1, false, _RET_IP_);
> + check_memory_region((unsigned long)ct, 1, false, _RET_IP_);
> +
> + return __strcmp(cs, ct);
> +}
> +#undef strncmp
> +int strncmp(const char *cs, const char *ct, size_t len)
> +{
> + check_memory_region((unsigned long)cs, len, false, _RET_IP_);
> + check_memory_region((unsigned long)ct, len, false, _RET_IP_);
> +
> + return __strncmp(cs, ct, len);
> +}
> +#endif
>
> void kasan_alloc_pages(struct page *page, unsigned int order)
> {
> -- 
> 2.6.2
Dmitry Vyukov Sept. 3, 2018, 9:13 a.m. UTC | #2
On Mon, Sep 3, 2018 at 11:02 AM, Kyeongdon Kim <kyeongdon.kim@lge.com> wrote:
> Dear all,
>
> Could anyone review this and provide me appropriate approach ?
> I think str[n]cmp are frequently used functions so I believe very useful w/
> arm64 KASAN.

Hi Kyeongdon,

Please add tests for this to lib/test_kasan.c.

> On 2018-08-23 오후 5:56, Kyeongdon Kim wrote:
>>
>> This patch declares strcmp/strncmp as weak symbols.
>> (2 of them are the most used string operations)
>>
>> Original functions declared as weak and
>> strong ones in mm/kasan/kasan.c could replace them.
>>
>> Assembly optimized strcmp/strncmp functions cannot detect KASan bug.
>> But, now we can detect them like the call trace below.
>>
>> ==================================================================
>> BUG: KASAN: use-after-free in platform_match+0x1c/0x5c at addr
>> ffffffc0ad313500
>> Read of size 1 by task swapper/0/1
>> CPU: 3 PID: 1 Comm: swapper/0 Tainted: G B 4.9.77+ #1
>> Hardware name: Generic (DT) based system
>> Call trace:
>> dump_backtrace+0x0/0x2e0
>> show_stack+0x14/0x1c
>> dump_stack+0x88/0xb0
>> kasan_object_err+0x24/0x7c
>> kasan_report+0x2f0/0x484
>> check_memory_region+0x20/0x14c
>> strcmp+0x1c/0x5c
>> platform_match+0x40/0xe4
>> __driver_attach+0x40/0x130
>> bus_for_each_dev+0xc4/0xe0
>> driver_attach+0x30/0x3c
>> bus_add_driver+0x2dc/0x328
>> driver_register+0x118/0x160
>> __platform_driver_register+0x7c/0x88
>> alarmtimer_init+0x154/0x1e4
>> do_one_initcall+0x184/0x1a4
>> kernel_init_freeable+0x2ec/0x2f0
>> kernel_init+0x18/0x10c
>> ret_from_fork+0x10/0x50
>>
>> In case of xtensa and x86_64 kasan, no need to use this patch now.
>>
>> Signed-off-by: Kyeongdon Kim <kyeongdon.kim@lge.com>
>> ---
>> arch/arm64/include/asm/string.h | 5 +++++
>> arch/arm64/kernel/arm64ksyms.c | 2 ++
>> arch/arm64/kernel/image.h | 2 ++
>> arch/arm64/lib/strcmp.S | 3 +++
>> arch/arm64/lib/strncmp.S | 3 +++
>> mm/kasan/kasan.c | 23 +++++++++++++++++++++++
>> 6 files changed, 38 insertions(+)
>>
>> diff --git a/arch/arm64/include/asm/string.h
>> b/arch/arm64/include/asm/string.h
>> index dd95d33..ab60349 100644
>> --- a/arch/arm64/include/asm/string.h
>> +++ b/arch/arm64/include/asm/string.h
>> @@ -24,9 +24,11 @@ extern char *strchr(const char *, int c);
>>
>> #define __HAVE_ARCH_STRCMP
>> extern int strcmp(const char *, const char *);
>> +extern int __strcmp(const char *, const char *);
>>
>> #define __HAVE_ARCH_STRNCMP
>> extern int strncmp(const char *, const char *, __kernel_size_t);
>> +extern int __strncmp(const char *, const char *, __kernel_size_t);
>>
>> #define __HAVE_ARCH_STRLEN
>> extern __kernel_size_t strlen(const char *);
>> @@ -68,6 +70,9 @@ void memcpy_flushcache(void *dst, const void *src,
>> size_t cnt);
>> #define memmove(dst, src, len) __memmove(dst, src, len)
>> #define memset(s, c, n) __memset(s, c, n)
>>
>> +#define strcmp(cs, ct) __strcmp(cs, ct)
>> +#define strncmp(cs, ct, n) __strncmp(cs, ct, n)
>> +
>> #ifndef __NO_FORTIFY
>> #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
>> #endif
>> diff --git a/arch/arm64/kernel/arm64ksyms.c
>> b/arch/arm64/kernel/arm64ksyms.c
>> index d894a20..10b1164 100644
>> --- a/arch/arm64/kernel/arm64ksyms.c
>> +++ b/arch/arm64/kernel/arm64ksyms.c
>> @@ -50,6 +50,8 @@ EXPORT_SYMBOL(strcmp);
>> EXPORT_SYMBOL(strncmp);
>> EXPORT_SYMBOL(strlen);
>> EXPORT_SYMBOL(strnlen);
>> +EXPORT_SYMBOL(__strcmp);
>> +EXPORT_SYMBOL(__strncmp);
>> EXPORT_SYMBOL(memset);
>> EXPORT_SYMBOL(memcpy);
>> EXPORT_SYMBOL(memmove);
>> diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
>> index a820ed0..5ef7a57 100644
>> --- a/arch/arm64/kernel/image.h
>> +++ b/arch/arm64/kernel/image.h
>> @@ -110,6 +110,8 @@ __efistub___flush_dcache_area =
>> KALLSYMS_HIDE(__pi___flush_dcache_area);
>> __efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
>> __efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
>> __efistub___memset = KALLSYMS_HIDE(__pi_memset);
>> +__efistub___strcmp = KALLSYMS_HIDE(__pi_strcmp);
>> +__efistub___strncmp = KALLSYMS_HIDE(__pi_strncmp);
>> #endif
>>
>> __efistub__text = KALLSYMS_HIDE(_text);
>> diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
>> index 471fe61..0dffef7 100644
>> --- a/arch/arm64/lib/strcmp.S
>> +++ b/arch/arm64/lib/strcmp.S
>> @@ -60,6 +60,8 @@ tmp3 .req x9
>> zeroones .req x10
>> pos .req x11
>>
>> +.weak strcmp
>> +ENTRY(__strcmp)
>> ENTRY(strcmp)
>> eor tmp1, src1, src2
>> mov zeroones, #REP8_01
>> @@ -232,3 +234,4 @@ CPU_BE( orr syndrome, diff, has_nul )
>> sub result, data1, data2, lsr #56
>> ret
>> ENDPIPROC(strcmp)
>> +ENDPROC(__strcmp)
>> diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
>> index e267044..b2648c7 100644
>> --- a/arch/arm64/lib/strncmp.S
>> +++ b/arch/arm64/lib/strncmp.S
>> @@ -64,6 +64,8 @@ limit_wd .req x13
>> mask .req x14
>> endloop .req x15
>>
>> +.weak strncmp
>> +ENTRY(__strncmp)
>> ENTRY(strncmp)
>> cbz limit, .Lret0
>> eor tmp1, src1, src2
>> @@ -308,3 +310,4 @@ CPU_BE( orr syndrome, diff, has_nul )
>> mov result, #0
>> ret
>> ENDPIPROC(strncmp)
>> +ENDPROC(__strncmp)
>> diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
>> index c3bd520..61ad7f1 100644
>> --- a/mm/kasan/kasan.c
>> +++ b/mm/kasan/kasan.c
>> @@ -304,6 +304,29 @@ void *memcpy(void *dest, const void *src, size_t len)
>>
>> return __memcpy(dest, src, len);
>> }
>> +#ifdef CONFIG_ARM64
>> +/*
>> + * Arch arm64 use assembly variant for strcmp/strncmp,
>> + * xtensa use inline asm operations and x86_64 use c one,
>> + * so now this interceptors only for arm64 kasan.
>> + */
>> +#undef strcmp
>> +int strcmp(const char *cs, const char *ct)
>> +{
>> + check_memory_region((unsigned long)cs, 1, false, _RET_IP_);
>> + check_memory_region((unsigned long)ct, 1, false, _RET_IP_);
>> +
>> + return __strcmp(cs, ct);
>> +}
>> +#undef strncmp
>> +int strncmp(const char *cs, const char *ct, size_t len)
>> +{
>> + check_memory_region((unsigned long)cs, len, false, _RET_IP_);
>> + check_memory_region((unsigned long)ct, len, false, _RET_IP_);
>> +
>> + return __strncmp(cs, ct, len);
>> +}
>> +#endif
>>
>> void kasan_alloc_pages(struct page *page, unsigned int order)
>> {
>> --
>> 2.6.2
>
>
Andrey Ryabinin Sept. 3, 2018, 9:40 a.m. UTC | #3
On 08/23/2018 11:56 AM, Kyeongdon Kim wrote:

> diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
> index c3bd520..61ad7f1 100644
> --- a/mm/kasan/kasan.c
> +++ b/mm/kasan/kasan.c
> @@ -304,6 +304,29 @@ void *memcpy(void *dest, const void *src, size_t len)
>  
>  	return __memcpy(dest, src, len);
>  }
> +#ifdef CONFIG_ARM64
> +/*
> + * Arch arm64 use assembly variant for strcmp/strncmp,
> + * xtensa use inline asm operations and x86_64 use c one,
> + * so now this interceptors only for arm64 kasan.
> + */
> +#undef strcmp
> +int strcmp(const char *cs, const char *ct)
> +{
> +	check_memory_region((unsigned long)cs, 1, false, _RET_IP_);
> +	check_memory_region((unsigned long)ct, 1, false, _RET_IP_);
> +

Well this is definitely wrong. strcmp() often accesses far more than one byte.

> +	return __strcmp(cs, ct);
> +}
> +#undef strncmp
> +int strncmp(const char *cs, const char *ct, size_t len)
> +{
> +	check_memory_region((unsigned long)cs, len, false, _RET_IP_);
> +	check_memory_region((unsigned long)ct, len, false, _RET_IP_);

This will cause false positives. Both 'cs', and 'ct' could be less than len bytes.

There is no need in these interceptors, just use the C implementations from lib/string.c
like you did in your first patch.
The only thing that was wrong in the first patch is that assembly implementations
were compiled out instead of being declared week.


> +
> +	return __strncmp(cs, ct, len);
> +}
> +#endif
>  
>  void kasan_alloc_pages(struct page *page, unsigned int order)
>  {
>
Kyeongdon Kim Sept. 4, 2018, 6:29 a.m. UTC | #4
Hello Dmitry,


On 2018-09-03 오후 6:13, Dmitry Vyukov wrote:
> On Mon, Sep 3, 2018 at 11:02 AM, Kyeongdon Kim <kyeongdon.kim@lge.com> 
> wrote:
> > Dear all,
> >
> > Could anyone review this and provide me appropriate approach ?
> > I think str[n]cmp are frequently used functions so I believe very 
> useful w/
> > arm64 KASAN.
>
> Hi Kyeongdon,
>
> Please add tests for this to lib/test_kasan.c.
>
I'll add tests for this patch after next version upload.

Thanks,
>
> >>
> >> This patch declares strcmp/strncmp as weak symbols.
> >> (2 of them are the most used string operations)
> >>
> >> Original functions declared as weak and
> >> strong ones in mm/kasan/kasan.c could replace them.
> >>
> >> Assembly optimized strcmp/strncmp functions cannot detect KASan bug.
> >> But, now we can detect them like the call trace below.
> >>
> >> ==================================================================
> >> BUG: KASAN: use-after-free in platform_match+0x1c/0x5c at addr
> >> ffffffc0ad313500
> >> Read of size 1 by task swapper/0/1
> >> CPU: 3 PID: 1 Comm: swapper/0 Tainted: G B 4.9.77+ #1
> >> Hardware name: Generic (DT) based system
> >> Call trace:
> >> dump_backtrace+0x0/0x2e0
> >> show_stack+0x14/0x1c
> >> dump_stack+0x88/0xb0
> >> kasan_object_err+0x24/0x7c
> >> kasan_report+0x2f0/0x484
> >> check_memory_region+0x20/0x14c
> >> strcmp+0x1c/0x5c
> >> platform_match+0x40/0xe4
> >> __driver_attach+0x40/0x130
> >> bus_for_each_dev+0xc4/0xe0
> >> driver_attach+0x30/0x3c
> >> bus_add_driver+0x2dc/0x328
> >> driver_register+0x118/0x160
> >> __platform_driver_register+0x7c/0x88
> >> alarmtimer_init+0x154/0x1e4
> >> do_one_initcall+0x184/0x1a4
> >> kernel_init_freeable+0x2ec/0x2f0
> >> kernel_init+0x18/0x10c
> >> ret_from_fork+0x10/0x50
> >>
> >> In case of xtensa and x86_64 kasan, no need to use this patch now.
> >>
> >> Signed-off-by: Kyeongdon Kim <kyeongdon.kim@lge.com>
> >> ---
> >> arch/arm64/include/asm/string.h | 5 +++++
> >> arch/arm64/kernel/arm64ksyms.c | 2 ++
> >> arch/arm64/kernel/image.h | 2 ++
> >> arch/arm64/lib/strcmp.S | 3 +++
> >> arch/arm64/lib/strncmp.S | 3 +++
> >> mm/kasan/kasan.c | 23 +++++++++++++++++++++++
> >> 6 files changed, 38 insertions(+)
> >>
> >> diff --git a/arch/arm64/include/asm/string.h
> >> b/arch/arm64/include/asm/string.h
> >> index dd95d33..ab60349 100644
> >> --- a/arch/arm64/include/asm/string.h
> >> +++ b/arch/arm64/include/asm/string.h
> >> @@ -24,9 +24,11 @@ extern char *strchr(const char *, int c);
> >>
> >> #define __HAVE_ARCH_STRCMP
> >> extern int strcmp(const char *, const char *);
> >> +extern int __strcmp(const char *, const char *);
> >>
> >> #define __HAVE_ARCH_STRNCMP
> >> extern int strncmp(const char *, const char *, __kernel_size_t);
> >> +extern int __strncmp(const char *, const char *, __kernel_size_t);
> >>
> >> #define __HAVE_ARCH_STRLEN
> >> extern __kernel_size_t strlen(const char *);
> >> @@ -68,6 +70,9 @@ void memcpy_flushcache(void *dst, const void *src,
> >> size_t cnt);
> >> #define memmove(dst, src, len) __memmove(dst, src, len)
> >> #define memset(s, c, n) __memset(s, c, n)
> >>
> >> +#define strcmp(cs, ct) __strcmp(cs, ct)
> >> +#define strncmp(cs, ct, n) __strncmp(cs, ct, n)
> >> +
> >> #ifndef __NO_FORTIFY
> >> #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
> >> #endif
> >> diff --git a/arch/arm64/kernel/arm64ksyms.c
> >> b/arch/arm64/kernel/arm64ksyms.c
> >> index d894a20..10b1164 100644
> >> --- a/arch/arm64/kernel/arm64ksyms.c
> >> +++ b/arch/arm64/kernel/arm64ksyms.c
> >> @@ -50,6 +50,8 @@ EXPORT_SYMBOL(strcmp);
> >> EXPORT_SYMBOL(strncmp);
> >> EXPORT_SYMBOL(strlen);
> >> EXPORT_SYMBOL(strnlen);
> >> +EXPORT_SYMBOL(__strcmp);
> >> +EXPORT_SYMBOL(__strncmp);
> >> EXPORT_SYMBOL(memset);
> >> EXPORT_SYMBOL(memcpy);
> >> EXPORT_SYMBOL(memmove);
> >> diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
> >> index a820ed0..5ef7a57 100644
> >> --- a/arch/arm64/kernel/image.h
> >> +++ b/arch/arm64/kernel/image.h
> >> @@ -110,6 +110,8 @@ __efistub___flush_dcache_area =
> >> KALLSYMS_HIDE(__pi___flush_dcache_area);
> >> __efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
> >> __efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
> >> __efistub___memset = KALLSYMS_HIDE(__pi_memset);
> >> +__efistub___strcmp = KALLSYMS_HIDE(__pi_strcmp);
> >> +__efistub___strncmp = KALLSYMS_HIDE(__pi_strncmp);
> >> #endif
> >>
> >> __efistub__text = KALLSYMS_HIDE(_text);
> >> diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
> >> index 471fe61..0dffef7 100644
> >> --- a/arch/arm64/lib/strcmp.S
> >> +++ b/arch/arm64/lib/strcmp.S
> >> @@ -60,6 +60,8 @@ tmp3 .req x9
> >> zeroones .req x10
> >> pos .req x11
> >>
> >> +.weak strcmp
> >> +ENTRY(__strcmp)
> >> ENTRY(strcmp)
> >> eor tmp1, src1, src2
> >> mov zeroones, #REP8_01
> >> @@ -232,3 +234,4 @@ CPU_BE( orr syndrome, diff, has_nul )
> >> sub result, data1, data2, lsr #56
> >> ret
> >> ENDPIPROC(strcmp)
> >> +ENDPROC(__strcmp)
> >> diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
> >> index e267044..b2648c7 100644
> >> --- a/arch/arm64/lib/strncmp.S
> >> +++ b/arch/arm64/lib/strncmp.S
> >> @@ -64,6 +64,8 @@ limit_wd .req x13
> >> mask .req x14
> >> endloop .req x15
> >>
> >> +.weak strncmp
> >> +ENTRY(__strncmp)
> >> ENTRY(strncmp)
> >> cbz limit, .Lret0
> >> eor tmp1, src1, src2
> >> @@ -308,3 +310,4 @@ CPU_BE( orr syndrome, diff, has_nul )
> >> mov result, #0
> >> ret
> >> ENDPIPROC(strncmp)
> >> +ENDPROC(__strncmp)
> >> diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
> >> index c3bd520..61ad7f1 100644
> >> --- a/mm/kasan/kasan.c
> >> +++ b/mm/kasan/kasan.c
> >> @@ -304,6 +304,29 @@ void *memcpy(void *dest, const void *src, 
> size_t len)
> >>
> >> return __memcpy(dest, src, len);
> >> }
> >> +#ifdef CONFIG_ARM64
> >> +/*
> >> + * Arch arm64 use assembly variant for strcmp/strncmp,
> >> + * xtensa use inline asm operations and x86_64 use c one,
> >> + * so now this interceptors only for arm64 kasan.
> >> + */
> >> +#undef strcmp
> >> +int strcmp(const char *cs, const char *ct)
> >> +{
> >> + check_memory_region((unsigned long)cs, 1, false, _RET_IP_);
> >> + check_memory_region((unsigned long)ct, 1, false, _RET_IP_);
> >> +
> >> + return __strcmp(cs, ct);
> >> +}
> >> +#undef strncmp
> >> +int strncmp(const char *cs, const char *ct, size_t len)
> >> +{
> >> + check_memory_region((unsigned long)cs, len, false, _RET_IP_);
> >> + check_memory_region((unsigned long)ct, len, false, _RET_IP_);
> >> +
> >> + return __strncmp(cs, ct, len);
> >> +}
> >> +#endif
> >>
> >> void kasan_alloc_pages(struct page *page, unsigned int order)
> >> {
> >> --
> >> 2.6.2
> >
> >
Kyeongdon Kim Sept. 4, 2018, 6:59 a.m. UTC | #5
Hello Andrey,

Thanks for your review.

On 2018-09-03 오후 6:40, Andrey Ryabinin wrote:
>
>
> On 08/23/2018 11:56 AM, Kyeongdon Kim wrote:
>
> > diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
> > index c3bd520..61ad7f1 100644
> > --- a/mm/kasan/kasan.c
> > +++ b/mm/kasan/kasan.c
> > @@ -304,6 +304,29 @@ void *memcpy(void *dest, const void *src, 
> size_t len)
> >
> > return __memcpy(dest, src, len);
> > }
> > +#ifdef CONFIG_ARM64
> > +/*
> > + * Arch arm64 use assembly variant for strcmp/strncmp,
> > + * xtensa use inline asm operations and x86_64 use c one,
> > + * so now this interceptors only for arm64 kasan.
> > + */
> > +#undef strcmp
> > +int strcmp(const char *cs, const char *ct)
> > +{
> > + check_memory_region((unsigned long)cs, 1, false, _RET_IP_);
> > + check_memory_region((unsigned long)ct, 1, false, _RET_IP_);
> > +
>
> Well this is definitely wrong. strcmp() often accesses far more than 
> one byte.
>
> > + return __strcmp(cs, ct);
> > +}
> > +#undef strncmp
> > +int strncmp(const char *cs, const char *ct, size_t len)
> > +{
> > + check_memory_region((unsigned long)cs, len, false, _RET_IP_);
> > + check_memory_region((unsigned long)ct, len, false, _RET_IP_);
>
> This will cause false positives. Both 'cs', and 'ct' could be less 
> than len bytes.
>
> There is no need in these interceptors, just use the C implementations 
> from lib/string.c
> like you did in your first patch.
> The only thing that was wrong in the first patch is that assembly 
> implementations
> were compiled out instead of being declared week.
>
Well, at first I thought so..
I would remove diff code in /mm/kasan/kasan.c then use C implementations 
in lib/string.c
w/ assem implementations as weak :

diff --git a/lib/string.c b/lib/string.c
index 2c0900a..a18b18f 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -312,7 +312,7 @@ size_t strlcat(char *dest, const char *src, size_t 
count)
  EXPORT_SYMBOL(strlcat);
  #endif

-#ifndef __HAVE_ARCH_STRCMP
+#if (defined(CONFIG_ARM64) && defined(CONFIG_KASAN)) || 
!defined(__HAVE_ARCH_STRCMP)
  /**
   * strcmp - Compare two strings
   * @cs: One string
@@ -336,7 +336,7 @@ int strcmp(const char *cs, const char *ct)
  EXPORT_SYMBOL(strcmp);
  #endif

-#ifndef __HAVE_ARCH_STRNCMP
+#if (defined(CONFIG_ARM64) && defined(CONFIG_KASAN)) || 
!defined(__HAVE_ARCH_STRNCMP)
  /**
   * strncmp - Compare two length-limited strings

Can I get your opinion wrt this ?

Thanks,
Andrey Ryabinin Sept. 4, 2018, 10:10 a.m. UTC | #6
On 09/04/2018 09:59 AM, Kyeongdon Kim wrote:

>> > +#undef strncmp
>> > +int strncmp(const char *cs, const char *ct, size_t len)
>> > +{
>> > + check_memory_region((unsigned long)cs, len, false, _RET_IP_);
>> > + check_memory_region((unsigned long)ct, len, false, _RET_IP_);
>>
>> This will cause false positives. Both 'cs', and 'ct' could be less than len bytes.
>>
>> There is no need in these interceptors, just use the C implementations from lib/string.c
>> like you did in your first patch.
>> The only thing that was wrong in the first patch is that assembly implementations
>> were compiled out instead of being declared week.
>>
> Well, at first I thought so..
> I would remove diff code in /mm/kasan/kasan.c then use C implementations in lib/string.c
> w/ assem implementations as weak :
> 
> diff --git a/lib/string.c b/lib/string.c
> index 2c0900a..a18b18f 100644
> --- a/lib/string.c
> +++ b/lib/string.c
> @@ -312,7 +312,7 @@ size_t strlcat(char *dest, const char *src, size_t count)
>  EXPORT_SYMBOL(strlcat);
>  #endif
> 
> -#ifndef __HAVE_ARCH_STRCMP
> +#if (defined(CONFIG_ARM64) && defined(CONFIG_KASAN)) || !defined(__HAVE_ARCH_STRCMP)

No. What part of "like you did in your first patch" is unclear to you?

>  /**
>   * strcmp - Compare two strings
>   * @cs: One string
> @@ -336,7 +336,7 @@ int strcmp(const char *cs, const char *ct)
>  EXPORT_SYMBOL(strcmp);
>  #endif
> 
> -#ifndef __HAVE_ARCH_STRNCMP
> +#if (defined(CONFIG_ARM64) && defined(CONFIG_KASAN)) || !defined(__HAVE_ARCH_STRNCMP)
>  /**
>   * strncmp - Compare two length-limited strings
> 
> Can I get your opinion wrt this ?
> 
> Thanks,
>
Andrey Ryabinin Sept. 4, 2018, 4:24 p.m. UTC | #7
On 09/04/2018 01:10 PM, Andrey Ryabinin wrote:
> 
> 
> On 09/04/2018 09:59 AM, Kyeongdon Kim wrote:
> 
>>>> +#undef strncmp
>>>> +int strncmp(const char *cs, const char *ct, size_t len)
>>>> +{
>>>> + check_memory_region((unsigned long)cs, len, false, _RET_IP_);
>>>> + check_memory_region((unsigned long)ct, len, false, _RET_IP_);
>>>
>>> This will cause false positives. Both 'cs', and 'ct' could be less than len bytes.
>>>
>>> There is no need in these interceptors, just use the C implementations from lib/string.c
>>> like you did in your first patch.
>>> The only thing that was wrong in the first patch is that assembly implementations
>>> were compiled out instead of being declared week.
>>>
>> Well, at first I thought so..
>> I would remove diff code in /mm/kasan/kasan.c then use C implementations in lib/string.c
>> w/ assem implementations as weak :
>>
>> diff --git a/lib/string.c b/lib/string.c
>> index 2c0900a..a18b18f 100644
>> --- a/lib/string.c
>> +++ b/lib/string.c
>> @@ -312,7 +312,7 @@ size_t strlcat(char *dest, const char *src, size_t count)
>>  EXPORT_SYMBOL(strlcat);
>>  #endif
>>
>> -#ifndef __HAVE_ARCH_STRCMP
>> +#if (defined(CONFIG_ARM64) && defined(CONFIG_KASAN)) || !defined(__HAVE_ARCH_STRCMP)
> 
> No. What part of "like you did in your first patch" is unclear to you?

Just to be absolutely clear, I meant #ifdef out __HAVE_ARCH_* defines like it has been done in this patch
http://lkml.kernel.org/r/<1534233322-106271-1-git-send-email-kyeongdon.kim@lge.com>
Kyeongdon Kim Sept. 5, 2018, 7:44 a.m. UTC | #8
On 2018-09-05 오전 1:24, Andrey Ryabinin wrote:
>
>
> On 09/04/2018 01:10 PM, Andrey Ryabinin wrote:
> >
> >
> > On 09/04/2018 09:59 AM, Kyeongdon Kim wrote:
> >
> >>>> +#undef strncmp
> >>>> +int strncmp(const char *cs, const char *ct, size_t len)
> >>>> +{
> >>>> + check_memory_region((unsigned long)cs, len, false, _RET_IP_);
> >>>> + check_memory_region((unsigned long)ct, len, false, _RET_IP_);
> >>>
> >>> This will cause false positives. Both 'cs', and 'ct' could be less 
> than len bytes.
> >>>
> >>> There is no need in these interceptors, just use the C 
> implementations from lib/string.c
> >>> like you did in your first patch.
> >>> The only thing that was wrong in the first patch is that assembly 
> implementations
> >>> were compiled out instead of being declared week.
> >>>
> >> Well, at first I thought so..
> >> I would remove diff code in /mm/kasan/kasan.c then use C 
> implementations in lib/string.c
> >> w/ assem implementations as weak :
> >>
> >> diff --git a/lib/string.c b/lib/string.c
> >> index 2c0900a..a18b18f 100644
> >> --- a/lib/string.c
> >> +++ b/lib/string.c
> >> @@ -312,7 +312,7 @@ size_t strlcat(char *dest, const char *src, 
> size_t count)
> >>  EXPORT_SYMBOL(strlcat);
> >>  #endif
> >>
> >> -#ifndef __HAVE_ARCH_STRCMP
> >> +#if (defined(CONFIG_ARM64) && defined(CONFIG_KASAN)) || 
> !defined(__HAVE_ARCH_STRCMP)
> >
> > No. What part of "like you did in your first patch" is unclear to you?
>
> Just to be absolutely clear, I meant #ifdef out __HAVE_ARCH_* defines 
> like it has been done in this patch
> http://lkml.kernel.org/r/<1534233322-106271-1-git-send-email-kyeongdon.kim@lge.com> 
>
I understood what you're saying, but I might think the wrong patch.

So, thinking about the other way as below:
can pick up assem variant or c one, declare them as weak.
---
diff --git a/arch/arm64/include/asm/string.h 
b/arch/arm64/include/asm/string.h
index dd95d33..53a2ae0 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -22,11 +22,22 @@ extern char *strrchr(const char *, int c);
  #define __HAVE_ARCH_STRCHR
  extern char *strchr(const char *, int c);

+#ifdef CONFIG_KASAN
+extern int __strcmp(const char *, const char *);
+extern int __strncmp(const char *, const char *, __kernel_size_t);
+
+#ifndef __SANITIZE_ADDRESS__
+#define strcmp(cs, ct) __strcmp(cs, ct)
+#define strncmp(cs, ct, n) __strncmp(cs, ct, n)
+#endif
+
+#else
  #define __HAVE_ARCH_STRCMP
  extern int strcmp(const char *, const char *);

  #define __HAVE_ARCH_STRNCMP
  extern int strncmp(const char *, const char *, __kernel_size_t);
+#endif

  #define __HAVE_ARCH_STRLEN
  extern __kernel_size_t strlen(const char *);
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index d894a20..9aeffd5 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -50,6 +50,10 @@ EXPORT_SYMBOL(strcmp);
  EXPORT_SYMBOL(strncmp);
  EXPORT_SYMBOL(strlen);
  EXPORT_SYMBOL(strnlen);
+#ifdef CONFIG_KASAN
+EXPORT_SYMBOL(__strcmp);
+EXPORT_SYMBOL(__strncmp);
+#endif
  EXPORT_SYMBOL(memset);
  EXPORT_SYMBOL(memcpy);
  EXPORT_SYMBOL(memmove);
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index a820ed0..5ef7a57 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -110,6 +110,8 @@ __efistub___flush_dcache_area    = 
KALLSYMS_HIDE(__pi___flush_dcache_area);
  __efistub___memcpy        = KALLSYMS_HIDE(__pi_memcpy);
  __efistub___memmove        = KALLSYMS_HIDE(__pi_memmove);
  __efistub___memset        = KALLSYMS_HIDE(__pi_memset);
+__efistub___strcmp        = KALLSYMS_HIDE(__pi_strcmp);
+__efistub___strncmp        = KALLSYMS_HIDE(__pi_strncmp);
  #endif

  __efistub__text            = KALLSYMS_HIDE(_text);
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 471fe61..0dffef7 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -60,6 +60,8 @@ tmp3        .req    x9
  zeroones    .req    x10
  pos        .req    x11

+.weak strcmp
+ENTRY(__strcmp)
  ENTRY(strcmp)
      eor    tmp1, src1, src2
      mov    zeroones, #REP8_01
@@ -232,3 +234,4 @@ CPU_BE(    orr    syndrome, diff, has_nul )
      sub    result, data1, data2, lsr #56
      ret
  ENDPIPROC(strcmp)
+ENDPROC(__strcmp)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index e267044..b2648c7 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -64,6 +64,8 @@ limit_wd    .req    x13
  mask        .req    x14
  endloop        .req    x15

+.weak strncmp
+ENTRY(__strncmp)
  ENTRY(strncmp)
      cbz    limit, .Lret0
      eor    tmp1, src1, src2
@@ -308,3 +310,4 @@ CPU_BE( orr    syndrome, diff, has_nul )
      mov    result, #0
      ret
  ENDPIPROC(strncmp)
+ENDPROC(__strncmp)
Andrey Ryabinin Sept. 6, 2018, 5:06 p.m. UTC | #9
On 09/05/2018 10:44 AM, Kyeongdon Kim wrote:
> 
> 
> On 2018-09-05 오전 1:24, Andrey Ryabinin wrote:
>>
>>
>> On 09/04/2018 01:10 PM, Andrey Ryabinin wrote:
>> >
>> >
>> > On 09/04/2018 09:59 AM, Kyeongdon Kim wrote:
>> >
>> >>>> +#undef strncmp
>> >>>> +int strncmp(const char *cs, const char *ct, size_t len)
>> >>>> +{
>> >>>> + check_memory_region((unsigned long)cs, len, false, _RET_IP_);
>> >>>> + check_memory_region((unsigned long)ct, len, false, _RET_IP_);
>> >>>
>> >>> This will cause false positives. Both 'cs', and 'ct' could be less than len bytes.
>> >>>
>> >>> There is no need in these interceptors, just use the C implementations from lib/string.c
>> >>> like you did in your first patch.
>> >>> The only thing that was wrong in the first patch is that assembly implementations
>> >>> were compiled out instead of being declared week.
>> >>>
>> >> Well, at first I thought so..
>> >> I would remove diff code in /mm/kasan/kasan.c then use C implementations in lib/string.c
>> >> w/ assem implementations as weak :
>> >>
>> >> diff --git a/lib/string.c b/lib/string.c
>> >> index 2c0900a..a18b18f 100644
>> >> --- a/lib/string.c
>> >> +++ b/lib/string.c
>> >> @@ -312,7 +312,7 @@ size_t strlcat(char *dest, const char *src, size_t count)
>> >>  EXPORT_SYMBOL(strlcat);
>> >>  #endif
>> >>
>> >> -#ifndef __HAVE_ARCH_STRCMP
>> >> +#if (defined(CONFIG_ARM64) && defined(CONFIG_KASAN)) || !defined(__HAVE_ARCH_STRCMP)
>> >
>> > No. What part of "like you did in your first patch" is unclear to you?
>>
>> Just to be absolutely clear, I meant #ifdef out __HAVE_ARCH_* defines like it has been done in this patch
>> http://lkml.kernel.org/r/<1534233322-106271-1-git-send-email-kyeongdon.kim@lge.com>
> I understood what you're saying, but I might think the wrong patch.
> 
> So, thinking about the other way as below:
> can pick up assem variant or c one, declare them as weak.


It's was much easier for me to explain with patch how this should be done in my opinion.
So I just sent the patches, take a look.
diff mbox series

Patch

==================================================================
BUG: KASAN: use-after-free in platform_match+0x1c/0x5c at addr ffffffc0ad313500
Read of size 1 by task swapper/0/1
CPU: 3 PID: 1 Comm: swapper/0 Tainted: G    B           4.9.77+ #1
Hardware name: Generic (DT) based system
Call trace:
 dump_backtrace+0x0/0x2e0
 show_stack+0x14/0x1c
 dump_stack+0x88/0xb0
 kasan_object_err+0x24/0x7c
 kasan_report+0x2f0/0x484
 check_memory_region+0x20/0x14c
 strcmp+0x1c/0x5c
 platform_match+0x40/0xe4
 __driver_attach+0x40/0x130
 bus_for_each_dev+0xc4/0xe0
 driver_attach+0x30/0x3c
 bus_add_driver+0x2dc/0x328
 driver_register+0x118/0x160
 __platform_driver_register+0x7c/0x88
 alarmtimer_init+0x154/0x1e4
 do_one_initcall+0x184/0x1a4
 kernel_init_freeable+0x2ec/0x2f0
 kernel_init+0x18/0x10c
 ret_from_fork+0x10/0x50

In case of xtensa and x86_64 kasan, no need to use this patch now.

Signed-off-by: Kyeongdon Kim <kyeongdon.kim@lge.com>
---
 arch/arm64/include/asm/string.h |  5 +++++
 arch/arm64/kernel/arm64ksyms.c  |  2 ++
 arch/arm64/kernel/image.h       |  2 ++
 arch/arm64/lib/strcmp.S         |  3 +++
 arch/arm64/lib/strncmp.S        |  3 +++
 mm/kasan/kasan.c                | 23 +++++++++++++++++++++++
 6 files changed, 38 insertions(+)

diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index dd95d33..ab60349 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -24,9 +24,11 @@  extern char *strchr(const char *, int c);
 
 #define __HAVE_ARCH_STRCMP
 extern int strcmp(const char *, const char *);
+extern int __strcmp(const char *, const char *);
 
 #define __HAVE_ARCH_STRNCMP
 extern int strncmp(const char *, const char *, __kernel_size_t);
+extern int __strncmp(const char *, const char *, __kernel_size_t);
 
 #define __HAVE_ARCH_STRLEN
 extern __kernel_size_t strlen(const char *);
@@ -68,6 +70,9 @@  void memcpy_flushcache(void *dst, const void *src, size_t cnt);
 #define memmove(dst, src, len) __memmove(dst, src, len)
 #define memset(s, c, n) __memset(s, c, n)
 
+#define strcmp(cs, ct) __strcmp(cs, ct)
+#define strncmp(cs, ct, n) __strncmp(cs, ct, n)
+
 #ifndef __NO_FORTIFY
 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
 #endif
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index d894a20..10b1164 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -50,6 +50,8 @@  EXPORT_SYMBOL(strcmp);
 EXPORT_SYMBOL(strncmp);
 EXPORT_SYMBOL(strlen);
 EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(__strcmp);
+EXPORT_SYMBOL(__strncmp);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memmove);
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index a820ed0..5ef7a57 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -110,6 +110,8 @@  __efistub___flush_dcache_area	= KALLSYMS_HIDE(__pi___flush_dcache_area);
 __efistub___memcpy		= KALLSYMS_HIDE(__pi_memcpy);
 __efistub___memmove		= KALLSYMS_HIDE(__pi_memmove);
 __efistub___memset		= KALLSYMS_HIDE(__pi_memset);
+__efistub___strcmp		= KALLSYMS_HIDE(__pi_strcmp);
+__efistub___strncmp		= KALLSYMS_HIDE(__pi_strncmp);
 #endif
 
 __efistub__text			= KALLSYMS_HIDE(_text);
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 471fe61..0dffef7 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -60,6 +60,8 @@  tmp3		.req	x9
 zeroones	.req	x10
 pos		.req	x11
 
+.weak strcmp
+ENTRY(__strcmp)
 ENTRY(strcmp)
 	eor	tmp1, src1, src2
 	mov	zeroones, #REP8_01
@@ -232,3 +234,4 @@  CPU_BE(	orr	syndrome, diff, has_nul )
 	sub	result, data1, data2, lsr #56
 	ret
 ENDPIPROC(strcmp)
+ENDPROC(__strcmp)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index e267044..b2648c7 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -64,6 +64,8 @@  limit_wd	.req	x13
 mask		.req	x14
 endloop		.req	x15
 
+.weak strncmp
+ENTRY(__strncmp)
 ENTRY(strncmp)
 	cbz	limit, .Lret0
 	eor	tmp1, src1, src2
@@ -308,3 +310,4 @@  CPU_BE( orr	syndrome, diff, has_nul )
 	mov	result, #0
 	ret
 ENDPIPROC(strncmp)
+ENDPROC(__strncmp)
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index c3bd520..61ad7f1 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -304,6 +304,29 @@  void *memcpy(void *dest, const void *src, size_t len)
 
 	return __memcpy(dest, src, len);
 }
+#ifdef CONFIG_ARM64
+/*
+ * Arch arm64 use assembly variant for strcmp/strncmp,
+ * xtensa use inline asm operations and x86_64 use c one,
+ * so now this interceptors only for arm64 kasan.
+ */
+#undef strcmp
+int strcmp(const char *cs, const char *ct)
+{
+	check_memory_region((unsigned long)cs, 1, false, _RET_IP_);
+	check_memory_region((unsigned long)ct, 1, false, _RET_IP_);
+
+	return __strcmp(cs, ct);
+}
+#undef strncmp
+int strncmp(const char *cs, const char *ct, size_t len)
+{
+	check_memory_region((unsigned long)cs, len, false, _RET_IP_);
+	check_memory_region((unsigned long)ct, len, false, _RET_IP_);
+
+	return __strncmp(cs, ct, len);
+}
+#endif
 
 void kasan_alloc_pages(struct page *page, unsigned int order)
 {