diff mbox series

mm: instrument copy_from/to_kernel_nofault

Message ID 20240927151438.2143936-1-snovitoll@gmail.com (mailing list archive)
State New
Headers show
Series mm: instrument copy_from/to_kernel_nofault | expand

Commit Message

Sabyrzhan Tasbolatov Sept. 27, 2024, 3:14 p.m. UTC
Instrument copy_from_kernel_nofault(), copy_to_kernel_nofault()
with instrument_memcpy_before() for KASAN, KCSAN checks and
instrument_memcpy_after() for KMSAN.

Tested on x86_64 and arm64 with CONFIG_KASAN_SW_TAGS.
On arm64 with CONFIG_KASAN_HW_TAGS, kunit test currently fails.
Need more clarification on it - currently, disabled in kunit test.

Reported-by: Andrey Konovalov <andreyknvl@gmail.com>
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=210505
Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
---
 mm/kasan/kasan_test.c | 31 +++++++++++++++++++++++++++++++
 mm/maccess.c          |  8 ++++++--
 2 files changed, 37 insertions(+), 2 deletions(-)

Comments

Marco Elver Oct. 2, 2024, 3:59 p.m. UTC | #1
On Fri, 27 Sept 2024 at 17:14, Sabyrzhan Tasbolatov <snovitoll@gmail.com> wrote:
>
> Instrument copy_from_kernel_nofault(), copy_to_kernel_nofault()
> with instrument_memcpy_before() for KASAN, KCSAN checks and
> instrument_memcpy_after() for KMSAN.

There's a fundamental problem with instrumenting
copy_from_kernel_nofault() - it's meant to be a non-faulting helper,
i.e. if it attempts to read arbitrary kernel addresses, that's not a
problem because it won't fault and BUG. These may be used in places
that probe random memory, and KASAN may say that some memory is
invalid and generate a report - but in reality that's not a problem.

In the Bugzilla bug, Andrey wrote:

> KASAN should check both arguments of copy_from/to_kernel_nofault() for accessibility when both are fault-safe.

I don't see this patch doing it, or at least it's not explained. By
looking at the code, I see that it does the instrument_memcpy_before()
right after pagefault_disable(), which tells me that KASAN or other
tools will complain if a page is not faulted in. These helpers are
meant to be usable like that - despite their inherent unsafety,
there's little that I see that KASAN can help with.

What _might_ be useful, is detecting copying faulted-in but
uninitialized memory to user space. So I think the only
instrumentation we want to retain is KMSAN instrumentation for the
copy_from_kernel_nofault() helper, and only if no fault was
encountered.

Instrumenting copy_to_kernel_nofault() may be helpful to catch memory
corruptions, but only if faulted-in memory was accessed.



> Tested on x86_64 and arm64 with CONFIG_KASAN_SW_TAGS.
> On arm64 with CONFIG_KASAN_HW_TAGS, kunit test currently fails.
> Need more clarification on it - currently, disabled in kunit test.
>
> Reported-by: Andrey Konovalov <andreyknvl@gmail.com>
> Closes: https://bugzilla.kernel.org/show_bug.cgi?id=210505
> Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
> ---
>  mm/kasan/kasan_test.c | 31 +++++++++++++++++++++++++++++++
>  mm/maccess.c          |  8 ++++++--
>  2 files changed, 37 insertions(+), 2 deletions(-)
>
> diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> index 567d33b49..329d81518 100644
> --- a/mm/kasan/kasan_test.c
> +++ b/mm/kasan/kasan_test.c
> @@ -1944,6 +1944,36 @@ static void match_all_mem_tag(struct kunit *test)
>         kfree(ptr);
>  }
>
> +static void copy_from_to_kernel_nofault_oob(struct kunit *test)
> +{
> +       char *ptr;
> +       char buf[128];
> +       size_t size = sizeof(buf);
> +
> +       /* Not detecting fails currently with HW_TAGS */
> +       KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
> +
> +       ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL);
> +       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +       OPTIMIZER_HIDE_VAR(ptr);
> +
> +       if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) {
> +               /* Check that the returned pointer is tagged. */
> +               KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
> +               KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
> +       }
> +
> +       KUNIT_EXPECT_KASAN_FAIL(test,
> +               copy_from_kernel_nofault(&buf[0], ptr, size));
> +       KUNIT_EXPECT_KASAN_FAIL(test,
> +               copy_from_kernel_nofault(ptr, &buf[0], size));
> +       KUNIT_EXPECT_KASAN_FAIL(test,
> +               copy_to_kernel_nofault(&buf[0], ptr, size));
> +       KUNIT_EXPECT_KASAN_FAIL(test,
> +               copy_to_kernel_nofault(ptr, &buf[0], size));
> +       kfree(ptr);
> +}
> +
>  static struct kunit_case kasan_kunit_test_cases[] = {
>         KUNIT_CASE(kmalloc_oob_right),
>         KUNIT_CASE(kmalloc_oob_left),
> @@ -2017,6 +2047,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
>         KUNIT_CASE(match_all_not_assigned),
>         KUNIT_CASE(match_all_ptr_tag),
>         KUNIT_CASE(match_all_mem_tag),
> +       KUNIT_CASE(copy_from_to_kernel_nofault_oob),
>         {}
>  };
>
> diff --git a/mm/maccess.c b/mm/maccess.c
> index 518a25667..2c4251df4 100644
> --- a/mm/maccess.c
> +++ b/mm/maccess.c
> @@ -15,7 +15,7 @@ bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
>
>  #define copy_from_kernel_nofault_loop(dst, src, len, type, err_label)  \
>         while (len >= sizeof(type)) {                                   \
> -               __get_kernel_nofault(dst, src, type, err_label);                \
> +               __get_kernel_nofault(dst, src, type, err_label);        \
>                 dst += sizeof(type);                                    \
>                 src += sizeof(type);                                    \
>                 len -= sizeof(type);                                    \
> @@ -32,6 +32,7 @@ long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
>                 return -ERANGE;
>
>         pagefault_disable();
> +       instrument_memcpy_before(dst, src, size);
>         if (!(align & 7))
>                 copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
>         if (!(align & 3))
> @@ -39,6 +40,7 @@ long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
>         if (!(align & 1))
>                 copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
>         copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
> +       instrument_memcpy_after(dst, src, size, 0);
>         pagefault_enable();
>         return 0;
>  Efault:
> @@ -49,7 +51,7 @@ EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
>
>  #define copy_to_kernel_nofault_loop(dst, src, len, type, err_label)    \
>         while (len >= sizeof(type)) {                                   \
> -               __put_kernel_nofault(dst, src, type, err_label);                \
> +               __put_kernel_nofault(dst, src, type, err_label);        \
>                 dst += sizeof(type);                                    \
>                 src += sizeof(type);                                    \
>                 len -= sizeof(type);                                    \
> @@ -63,6 +65,7 @@ long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
>                 align = (unsigned long)dst | (unsigned long)src;
>
>         pagefault_disable();
> +       instrument_memcpy_before(dst, src, size);
>         if (!(align & 7))
>                 copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
>         if (!(align & 3))
> @@ -70,6 +73,7 @@ long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
>         if (!(align & 1))
>                 copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
>         copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
> +       instrument_memcpy_after(dst, src, size, 0);
>         pagefault_enable();
>         return 0;
>  Efault:
> --
> 2.34.1
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20240927151438.2143936-1-snovitoll%40gmail.com.
Sabyrzhan Tasbolatov Oct. 2, 2024, 4:39 p.m. UTC | #2
On Wed, Oct 2, 2024 at 9:00 PM Marco Elver <elver@google.com> wrote:
>
> On Fri, 27 Sept 2024 at 17:14, Sabyrzhan Tasbolatov <snovitoll@gmail.com> wrote:
> >
> > Instrument copy_from_kernel_nofault(), copy_to_kernel_nofault()
> > with instrument_memcpy_before() for KASAN, KCSAN checks and
> > instrument_memcpy_after() for KMSAN.
>
> There's a fundamental problem with instrumenting
> copy_from_kernel_nofault() - it's meant to be a non-faulting helper,
> i.e. if it attempts to read arbitrary kernel addresses, that's not a
> problem because it won't fault and BUG. These may be used in places
> that probe random memory, and KASAN may say that some memory is
> invalid and generate a report - but in reality that's not a problem.
>
> In the Bugzilla bug, Andrey wrote:
>
> > KASAN should check both arguments of copy_from/to_kernel_nofault() for accessibility when both are fault-safe.
>
> I don't see this patch doing it, or at least it's not explained. By
> looking at the code, I see that it does the instrument_memcpy_before()
> right after pagefault_disable(), which tells me that KASAN or other
> tools will complain if a page is not faulted in. These helpers are
> meant to be usable like that - despite their inherent unsafety,
> there's little that I see that KASAN can help with.

Hello, thanks for the comment!
instrument_memcpy_before() has been replaced with
instrument_read() and instrument_write() in
commit 9e3f2b1ecdd4("mm, kasan: proper instrument _kernel_nofault"),
and there are KASAN, KCSAN checks.

> What _might_ be useful, is detecting copying faulted-in but
> uninitialized memory to user space. So I think the only
> instrumentation we want to retain is KMSAN instrumentation for the
> copy_from_kernel_nofault() helper, and only if no fault was
> encountered.
>
> Instrumenting copy_to_kernel_nofault() may be helpful to catch memory
> corruptions, but only if faulted-in memory was accessed.

If we need to have KMSAN only instrumentation for
copy_from_user_nofault(), then AFAIU, in mm/kasan/kasan_test.c
copy_from_to_kernel_nofault_oob() should have only
copy_to_kernel_nofault() OOB kunit test to trigger KASAN.
And copy_from_user_nofault() kunit test can be placed in mm/kmsan/kmsan_test.c.

I wonder if instrument_get_user macro is OK for src ptr in
copy_from_kernel_nofault().

If this is true understanding, then there is no need to add
kasan_disable_current(),
kasan_enable_current() for kernel helpers functions that use
copy_from_kernel_nofault().

>
>
> > Tested on x86_64 and arm64 with CONFIG_KASAN_SW_TAGS.
> > On arm64 with CONFIG_KASAN_HW_TAGS, kunit test currently fails.
> > Need more clarification on it - currently, disabled in kunit test.
> >
> > Reported-by: Andrey Konovalov <andreyknvl@gmail.com>
> > Closes: https://bugzilla.kernel.org/show_bug.cgi?id=210505
> > Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
> > ---
> >  mm/kasan/kasan_test.c | 31 +++++++++++++++++++++++++++++++
> >  mm/maccess.c          |  8 ++++++--
> >  2 files changed, 37 insertions(+), 2 deletions(-)
> >
> > diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> > index 567d33b49..329d81518 100644
> > --- a/mm/kasan/kasan_test.c
> > +++ b/mm/kasan/kasan_test.c
> > @@ -1944,6 +1944,36 @@ static void match_all_mem_tag(struct kunit *test)
> >         kfree(ptr);
> >  }
> >
> > +static void copy_from_to_kernel_nofault_oob(struct kunit *test)
> > +{
> > +       char *ptr;
> > +       char buf[128];
> > +       size_t size = sizeof(buf);
> > +
> > +       /* Not detecting fails currently with HW_TAGS */
> > +       KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
> > +
> > +       ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL);
> > +       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> > +       OPTIMIZER_HIDE_VAR(ptr);
> > +
> > +       if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) {
> > +               /* Check that the returned pointer is tagged. */
> > +               KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
> > +               KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
> > +       }
> > +
> > +       KUNIT_EXPECT_KASAN_FAIL(test,
> > +               copy_from_kernel_nofault(&buf[0], ptr, size));
> > +       KUNIT_EXPECT_KASAN_FAIL(test,
> > +               copy_from_kernel_nofault(ptr, &buf[0], size));
> > +       KUNIT_EXPECT_KASAN_FAIL(test,
> > +               copy_to_kernel_nofault(&buf[0], ptr, size));
> > +       KUNIT_EXPECT_KASAN_FAIL(test,
> > +               copy_to_kernel_nofault(ptr, &buf[0], size));
> > +       kfree(ptr);
> > +}
> > +
> >  static struct kunit_case kasan_kunit_test_cases[] = {
> >         KUNIT_CASE(kmalloc_oob_right),
> >         KUNIT_CASE(kmalloc_oob_left),
> > @@ -2017,6 +2047,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
> >         KUNIT_CASE(match_all_not_assigned),
> >         KUNIT_CASE(match_all_ptr_tag),
> >         KUNIT_CASE(match_all_mem_tag),
> > +       KUNIT_CASE(copy_from_to_kernel_nofault_oob),
> >         {}
> >  };
> >
> > diff --git a/mm/maccess.c b/mm/maccess.c
> > index 518a25667..2c4251df4 100644
> > --- a/mm/maccess.c
> > +++ b/mm/maccess.c
> > @@ -15,7 +15,7 @@ bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
> >
> >  #define copy_from_kernel_nofault_loop(dst, src, len, type, err_label)  \
> >         while (len >= sizeof(type)) {                                   \
> > -               __get_kernel_nofault(dst, src, type, err_label);                \
> > +               __get_kernel_nofault(dst, src, type, err_label);        \
> >                 dst += sizeof(type);                                    \
> >                 src += sizeof(type);                                    \
> >                 len -= sizeof(type);                                    \
> > @@ -32,6 +32,7 @@ long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
> >                 return -ERANGE;
> >
> >         pagefault_disable();
> > +       instrument_memcpy_before(dst, src, size);
> >         if (!(align & 7))
> >                 copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
> >         if (!(align & 3))
> > @@ -39,6 +40,7 @@ long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
> >         if (!(align & 1))
> >                 copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
> >         copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
> > +       instrument_memcpy_after(dst, src, size, 0);
> >         pagefault_enable();
> >         return 0;
> >  Efault:
> > @@ -49,7 +51,7 @@ EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
> >
> >  #define copy_to_kernel_nofault_loop(dst, src, len, type, err_label)    \
> >         while (len >= sizeof(type)) {                                   \
> > -               __put_kernel_nofault(dst, src, type, err_label);                \
> > +               __put_kernel_nofault(dst, src, type, err_label);        \
> >                 dst += sizeof(type);                                    \
> >                 src += sizeof(type);                                    \
> >                 len -= sizeof(type);                                    \
> > @@ -63,6 +65,7 @@ long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
> >                 align = (unsigned long)dst | (unsigned long)src;
> >
> >         pagefault_disable();
> > +       instrument_memcpy_before(dst, src, size);
> >         if (!(align & 7))
> >                 copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
> >         if (!(align & 3))
> > @@ -70,6 +73,7 @@ long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
> >         if (!(align & 1))
> >                 copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
> >         copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
> > +       instrument_memcpy_after(dst, src, size, 0);
> >         pagefault_enable();
> >         return 0;
> >  Efault:
> > --
> > 2.34.1
> >
> > --
> > You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> > To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> > To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20240927151438.2143936-1-snovitoll%40gmail.com.
Marco Elver Oct. 4, 2024, 6:55 a.m. UTC | #3
On Wed, 2 Oct 2024 at 18:40, Sabyrzhan Tasbolatov <snovitoll@gmail.com> wrote:
>
> On Wed, Oct 2, 2024 at 9:00 PM Marco Elver <elver@google.com> wrote:
> >
> > On Fri, 27 Sept 2024 at 17:14, Sabyrzhan Tasbolatov <snovitoll@gmail.com> wrote:
> > >
> > > Instrument copy_from_kernel_nofault(), copy_to_kernel_nofault()
> > > with instrument_memcpy_before() for KASAN, KCSAN checks and
> > > instrument_memcpy_after() for KMSAN.
> >
> > There's a fundamental problem with instrumenting
> > copy_from_kernel_nofault() - it's meant to be a non-faulting helper,
> > i.e. if it attempts to read arbitrary kernel addresses, that's not a
> > problem because it won't fault and BUG. These may be used in places
> > that probe random memory, and KASAN may say that some memory is
> > invalid and generate a report - but in reality that's not a problem.
> >
> > In the Bugzilla bug, Andrey wrote:
> >
> > > KASAN should check both arguments of copy_from/to_kernel_nofault() for accessibility when both are fault-safe.
> >
> > I don't see this patch doing it, or at least it's not explained. By
> > looking at the code, I see that it does the instrument_memcpy_before()
> > right after pagefault_disable(), which tells me that KASAN or other
> > tools will complain if a page is not faulted in. These helpers are
> > meant to be usable like that - despite their inherent unsafety,
> > there's little that I see that KASAN can help with.
>
> Hello, thanks for the comment!
> instrument_memcpy_before() has been replaced with
> instrument_read() and instrument_write() in
> commit 9e3f2b1ecdd4("mm, kasan: proper instrument _kernel_nofault"),
> and there are KASAN, KCSAN checks.
>
> > What _might_ be useful, is detecting copying faulted-in but
> > uninitialized memory to user space. So I think the only
> > instrumentation we want to retain is KMSAN instrumentation for the
> > copy_from_kernel_nofault() helper, and only if no fault was
> > encountered.
> >
> > Instrumenting copy_to_kernel_nofault() may be helpful to catch memory
> > corruptions, but only if faulted-in memory was accessed.
>
> If we need to have KMSAN only instrumentation for
> copy_from_user_nofault(), then AFAIU, in mm/kasan/kasan_test.c

Did you mean s/copy_from_user_nofault/copy_from_kernel_nofault/?

> copy_from_to_kernel_nofault_oob() should have only
> copy_to_kernel_nofault() OOB kunit test to trigger KASAN.
> And copy_from_user_nofault() kunit test can be placed in mm/kmsan/kmsan_test.c.

I think in the interest of reducing false positives, I'd proceed with
making copy_from_kernel_nofault() KMSAN only.
Sabyrzhan Tasbolatov Oct. 4, 2024, 12:37 p.m. UTC | #4
On Fri, Oct 4, 2024 at 11:55 AM Marco Elver <elver@google.com> wrote:
>
> On Wed, 2 Oct 2024 at 18:40, Sabyrzhan Tasbolatov <snovitoll@gmail.com> wrote:
> >
> > On Wed, Oct 2, 2024 at 9:00 PM Marco Elver <elver@google.com> wrote:
> > >
> > > On Fri, 27 Sept 2024 at 17:14, Sabyrzhan Tasbolatov <snovitoll@gmail.com> wrote:
> > > >
> > > > Instrument copy_from_kernel_nofault(), copy_to_kernel_nofault()
> > > > with instrument_memcpy_before() for KASAN, KCSAN checks and
> > > > instrument_memcpy_after() for KMSAN.
> > >
> > > There's a fundamental problem with instrumenting
> > > copy_from_kernel_nofault() - it's meant to be a non-faulting helper,
> > > i.e. if it attempts to read arbitrary kernel addresses, that's not a
> > > problem because it won't fault and BUG. These may be used in places
> > > that probe random memory, and KASAN may say that some memory is
> > > invalid and generate a report - but in reality that's not a problem.
> > >
> > > In the Bugzilla bug, Andrey wrote:
> > >
> > > > KASAN should check both arguments of copy_from/to_kernel_nofault() for accessibility when both are fault-safe.
> > >
> > > I don't see this patch doing it, or at least it's not explained. By
> > > looking at the code, I see that it does the instrument_memcpy_before()
> > > right after pagefault_disable(), which tells me that KASAN or other
> > > tools will complain if a page is not faulted in. These helpers are
> > > meant to be usable like that - despite their inherent unsafety,
> > > there's little that I see that KASAN can help with.
> >
> > Hello, thanks for the comment!
> > instrument_memcpy_before() has been replaced with
> > instrument_read() and instrument_write() in
> > commit 9e3f2b1ecdd4("mm, kasan: proper instrument _kernel_nofault"),
> > and there are KASAN, KCSAN checks.
> >
> > > What _might_ be useful, is detecting copying faulted-in but
> > > uninitialized memory to user space. So I think the only
> > > instrumentation we want to retain is KMSAN instrumentation for the
> > > copy_from_kernel_nofault() helper, and only if no fault was
> > > encountered.
> > >
> > > Instrumenting copy_to_kernel_nofault() may be helpful to catch memory
> > > corruptions, but only if faulted-in memory was accessed.
> >
> > If we need to have KMSAN only instrumentation for
> > copy_from_user_nofault(), then AFAIU, in mm/kasan/kasan_test.c
>
> Did you mean s/copy_from_user_nofault/copy_from_kernel_nofault/?
Yes, typo, sorry for the confusion.

>
> > copy_from_to_kernel_nofault_oob() should have only
> > copy_to_kernel_nofault() OOB kunit test to trigger KASAN.
> > And copy_from_user_nofault() kunit test can be placed in mm/kmsan/kmsan_test.c.
>
> I think in the interest of reducing false positives, I'd proceed with
> making copy_from_kernel_nofault() KMSAN only.

Here is my current upcoming patch that I will send separately
once it's tested, it's slowly being compiled on my laptop (away from PC).
I've moved copy_from_kernel_nofault() to kmsan_test.c and added
kmsan_check_memory() _before_ pagefault_disable() to check
kernel src address if it's initialized.
For copy_to_kernel_nofault() , I've left instrument_write() for memory
corruption check but before pagefault_disable() again, if I understood the logic
correctly. I will adjust kmsan kunit test once I can run it and send a PATCH.
Meanwhile, please let me know if the order of instrumentation before
pagefault_disable()
is correct.

> By looking at the code, I see that it does the instrument_memcpy_before()
> right after pagefault_disable(), which tells me that KASAN or other
> tools will complain if a page is not faulted in. These helpers are
> meant to be usable like that - despite their inherent unsafety,
> there's little that I see that KASAN can help with.
---
 mm/kasan/kasan_test_c.c |  8 ++------
 mm/kmsan/kmsan_test.c   | 16 ++++++++++++++++
 mm/maccess.c            |  5 +++--
 3 files changed, 21 insertions(+), 8 deletions(-)

diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
index 0a226ab032d..5cff90f831d 100644
--- a/mm/kasan/kasan_test_c.c
+++ b/mm/kasan/kasan_test_c.c
@@ -1954,7 +1954,7 @@ static void rust_uaf(struct kunit *test)
  KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
 }

-static void copy_from_to_kernel_nofault_oob(struct kunit *test)
+static void copy_to_kernel_nofault_oob(struct kunit *test)
 {
  char *ptr;
  char buf[128];
@@ -1973,10 +1973,6 @@ static void
copy_from_to_kernel_nofault_oob(struct kunit *test)
  KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  }

- KUNIT_EXPECT_KASAN_FAIL(test,
- copy_from_kernel_nofault(&buf[0], ptr, size));
- KUNIT_EXPECT_KASAN_FAIL(test,
- copy_from_kernel_nofault(ptr, &buf[0], size));
  KUNIT_EXPECT_KASAN_FAIL(test,
  copy_to_kernel_nofault(&buf[0], ptr, size));
  KUNIT_EXPECT_KASAN_FAIL(test,
@@ -2057,7 +2053,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
  KUNIT_CASE(match_all_not_assigned),
  KUNIT_CASE(match_all_ptr_tag),
  KUNIT_CASE(match_all_mem_tag),
- KUNIT_CASE(copy_from_to_kernel_nofault_oob),
+ KUNIT_CASE(copy_to_kernel_nofault_oob),
  KUNIT_CASE(rust_uaf),
  {}
 };
diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
index 13236d579eb..fc50d0aef47 100644
--- a/mm/kmsan/kmsan_test.c
+++ b/mm/kmsan/kmsan_test.c
@@ -640,6 +640,21 @@ static void test_unpoison_memory(struct kunit *test)
  KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }

+static void test_copy_from_kernel_nofault(struct kunit *test)
+{
+ long ret;
+ volatile char src[4], dst[4];
+
+ EXPECTATION_UNINIT_VALUE_FN(expect, "test_copy_from_kernel_nofault");
+ kunit_info(
+ test,
+ "testing copy_from_kernel_nofault with src uninitialized memory\n");
+
+ ret = copy_from_kernel_nofault(dst, src, sizeof(src));
+ USE(ret);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
 static struct kunit_case kmsan_test_cases[] = {
  KUNIT_CASE(test_uninit_kmalloc),
  KUNIT_CASE(test_init_kmalloc),
@@ -664,6 +679,7 @@ static struct kunit_case kmsan_test_cases[] = {
  KUNIT_CASE(test_long_origin_chain),
  KUNIT_CASE(test_stackdepot_roundtrip),
  KUNIT_CASE(test_unpoison_memory),
+ KUNIT_CASE(test_copy_from_kernel_nofault),
  {},
 };

diff --git a/mm/maccess.c b/mm/maccess.c
index f752f0c0fa3..a91a39a56cf 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -31,8 +31,9 @@ long copy_from_kernel_nofault(void *dst, const void
*src, size_t size)
  if (!copy_from_kernel_nofault_allowed(src, size))
  return -ERANGE;

+ /* Make sure uninitialized kernel memory isn't copied. */
+ kmsan_check_memory(src, size);
  pagefault_disable();
- instrument_read(src, size);
  if (!(align & 7))
  copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
  if (!(align & 3))
@@ -63,8 +64,8 @@ long copy_to_kernel_nofault(void *dst, const void
*src, size_t size)
  if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
  align = (unsigned long)dst | (unsigned long)src;

- pagefault_disable();
  instrument_write(dst, size);
+ pagefault_disable();
  if (!(align & 7))
  copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
  if (!(align & 3))
diff mbox series

Patch

diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 567d33b49..329d81518 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -1944,6 +1944,36 @@  static void match_all_mem_tag(struct kunit *test)
 	kfree(ptr);
 }
 
+static void copy_from_to_kernel_nofault_oob(struct kunit *test)
+{
+	char *ptr;
+	char buf[128];
+	size_t size = sizeof(buf);
+
+	/* Not detecting fails currently with HW_TAGS */
+	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
+
+	ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+	OPTIMIZER_HIDE_VAR(ptr);
+
+	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) {
+		/* Check that the returned pointer is tagged. */
+		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+	}
+
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		copy_from_kernel_nofault(&buf[0], ptr, size));
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		copy_from_kernel_nofault(ptr, &buf[0], size));
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		copy_to_kernel_nofault(&buf[0], ptr, size));
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		copy_to_kernel_nofault(ptr, &buf[0], size));
+	kfree(ptr);
+}
+
 static struct kunit_case kasan_kunit_test_cases[] = {
 	KUNIT_CASE(kmalloc_oob_right),
 	KUNIT_CASE(kmalloc_oob_left),
@@ -2017,6 +2047,7 @@  static struct kunit_case kasan_kunit_test_cases[] = {
 	KUNIT_CASE(match_all_not_assigned),
 	KUNIT_CASE(match_all_ptr_tag),
 	KUNIT_CASE(match_all_mem_tag),
+	KUNIT_CASE(copy_from_to_kernel_nofault_oob),
 	{}
 };
 
diff --git a/mm/maccess.c b/mm/maccess.c
index 518a25667..2c4251df4 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -15,7 +15,7 @@  bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
 
 #define copy_from_kernel_nofault_loop(dst, src, len, type, err_label)	\
 	while (len >= sizeof(type)) {					\
-		__get_kernel_nofault(dst, src, type, err_label);		\
+		__get_kernel_nofault(dst, src, type, err_label);	\
 		dst += sizeof(type);					\
 		src += sizeof(type);					\
 		len -= sizeof(type);					\
@@ -32,6 +32,7 @@  long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
 		return -ERANGE;
 
 	pagefault_disable();
+	instrument_memcpy_before(dst, src, size);
 	if (!(align & 7))
 		copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
 	if (!(align & 3))
@@ -39,6 +40,7 @@  long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
 	if (!(align & 1))
 		copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
 	copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
+	instrument_memcpy_after(dst, src, size, 0);
 	pagefault_enable();
 	return 0;
 Efault:
@@ -49,7 +51,7 @@  EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
 
 #define copy_to_kernel_nofault_loop(dst, src, len, type, err_label)	\
 	while (len >= sizeof(type)) {					\
-		__put_kernel_nofault(dst, src, type, err_label);		\
+		__put_kernel_nofault(dst, src, type, err_label);	\
 		dst += sizeof(type);					\
 		src += sizeof(type);					\
 		len -= sizeof(type);					\
@@ -63,6 +65,7 @@  long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
 		align = (unsigned long)dst | (unsigned long)src;
 
 	pagefault_disable();
+	instrument_memcpy_before(dst, src, size);
 	if (!(align & 7))
 		copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
 	if (!(align & 3))
@@ -70,6 +73,7 @@  long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
 	if (!(align & 1))
 		copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
 	copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
+	instrument_memcpy_after(dst, src, size, 0);
 	pagefault_enable();
 	return 0;
 Efault: