diff mbox series

[v2,3/4] mm: kmsan: apply __must_check to non-void functions

Message ID 20230413131223.4135168-3-glider@google.com (mailing list archive)
State New
Headers show
Series [v2,1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() | expand

Commit Message

Alexander Potapenko April 13, 2023, 1:12 p.m. UTC
Non-void KMSAN hooks may return error codes that indicate that KMSAN
failed to reflect the changed memory state in the metadata (e.g. it
could not create the necessary memory mappings). In such cases the
callers should handle the errors to prevent the tool from using the
inconsistent metadata in the future.

We mark non-void hooks with __must_check so that error handling is not
skipped.

Signed-off-by: Alexander Potapenko <glider@google.com>
---
 include/linux/kmsan.h | 43 ++++++++++++++++++++++---------------------
 1 file changed, 22 insertions(+), 21 deletions(-)

Comments

Marco Elver April 18, 2023, 10:10 a.m. UTC | #1
On Thu, 13 Apr 2023 at 15:12, 'Alexander Potapenko' via kasan-dev
<kasan-dev@googlegroups.com> wrote:
>
> Non-void KMSAN hooks may return error codes that indicate that KMSAN
> failed to reflect the changed memory state in the metadata (e.g. it
> could not create the necessary memory mappings). In such cases the
> callers should handle the errors to prevent the tool from using the
> inconsistent metadata in the future.
>
> We mark non-void hooks with __must_check so that error handling is not
> skipped.
>
> Signed-off-by: Alexander Potapenko <glider@google.com>

Reviewed-by: Marco Elver <elver@google.com>

> ---
>  include/linux/kmsan.h | 43 ++++++++++++++++++++++---------------------
>  1 file changed, 22 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
> index 30b17647ce3c7..e0c23a32cdf01 100644
> --- a/include/linux/kmsan.h
> +++ b/include/linux/kmsan.h
> @@ -54,7 +54,8 @@ void __init kmsan_init_runtime(void);
>   * Freed pages are either returned to buddy allocator or held back to be used
>   * as metadata pages.
>   */
> -bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order);
> +bool __init __must_check kmsan_memblock_free_pages(struct page *page,
> +                                                  unsigned int order);
>
>  /**
>   * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
> @@ -137,9 +138,11 @@ void kmsan_kfree_large(const void *ptr);
>   * vmalloc metadata address range. Returns 0 on success, callers must check
>   * for non-zero return value.
>   */
> -int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
> -                                  pgprot_t prot, struct page **pages,
> -                                  unsigned int page_shift);
> +int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
> +                                               unsigned long end,
> +                                               pgprot_t prot,
> +                                               struct page **pages,
> +                                               unsigned int page_shift);
>
>  /**
>   * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
> @@ -163,9 +166,9 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
>   * virtual memory. Returns 0 on success, callers must check for non-zero return
>   * value.
>   */
> -int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
> -                            phys_addr_t phys_addr, pgprot_t prot,
> -                            unsigned int page_shift);
> +int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
> +                                         phys_addr_t phys_addr, pgprot_t prot,
> +                                         unsigned int page_shift);
>
>  /**
>   * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
> @@ -237,8 +240,8 @@ static inline void kmsan_init_runtime(void)
>  {
>  }
>
> -static inline bool kmsan_memblock_free_pages(struct page *page,
> -                                            unsigned int order)
> +static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
> +                                                         unsigned int order)
>  {
>         return true;
>  }
> @@ -251,10 +254,9 @@ static inline void kmsan_task_exit(struct task_struct *task)
>  {
>  }
>
> -static inline int kmsan_alloc_page(struct page *page, unsigned int order,
> -                                  gfp_t flags)
> +static inline void kmsan_alloc_page(struct page *page, unsigned int order,
> +                                   gfp_t flags)
>  {
> -       return 0;
>  }
>
>  static inline void kmsan_free_page(struct page *page, unsigned int order)
> @@ -283,11 +285,9 @@ static inline void kmsan_kfree_large(const void *ptr)
>  {
>  }
>
> -static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
> -                                                unsigned long end,
> -                                                pgprot_t prot,
> -                                                struct page **pages,
> -                                                unsigned int page_shift)
> +static inline int __must_check kmsan_vmap_pages_range_noflush(
> +       unsigned long start, unsigned long end, pgprot_t prot,
> +       struct page **pages, unsigned int page_shift)
>  {
>         return 0;
>  }
> @@ -297,10 +297,11 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
>  {
>  }
>
> -static inline int kmsan_ioremap_page_range(unsigned long start,
> -                                          unsigned long end,
> -                                          phys_addr_t phys_addr, pgprot_t prot,
> -                                          unsigned int page_shift)
> +static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
> +                                                       unsigned long end,
> +                                                       phys_addr_t phys_addr,
> +                                                       pgprot_t prot,
> +                                                       unsigned int page_shift)
>  {
>         return 0;
>  }
> --
> 2.40.0.577.gac1e443424-goog
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20230413131223.4135168-3-glider%40google.com.
diff mbox series

Patch

diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
index 30b17647ce3c7..e0c23a32cdf01 100644
--- a/include/linux/kmsan.h
+++ b/include/linux/kmsan.h
@@ -54,7 +54,8 @@  void __init kmsan_init_runtime(void);
  * Freed pages are either returned to buddy allocator or held back to be used
  * as metadata pages.
  */
-bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order);
+bool __init __must_check kmsan_memblock_free_pages(struct page *page,
+						   unsigned int order);
 
 /**
  * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
@@ -137,9 +138,11 @@  void kmsan_kfree_large(const void *ptr);
  * vmalloc metadata address range. Returns 0 on success, callers must check
  * for non-zero return value.
  */
-int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
-				   pgprot_t prot, struct page **pages,
-				   unsigned int page_shift);
+int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
+						unsigned long end,
+						pgprot_t prot,
+						struct page **pages,
+						unsigned int page_shift);
 
 /**
  * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
@@ -163,9 +166,9 @@  void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
  * virtual memory. Returns 0 on success, callers must check for non-zero return
  * value.
  */
-int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
-			     phys_addr_t phys_addr, pgprot_t prot,
-			     unsigned int page_shift);
+int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+					  phys_addr_t phys_addr, pgprot_t prot,
+					  unsigned int page_shift);
 
 /**
  * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
@@ -237,8 +240,8 @@  static inline void kmsan_init_runtime(void)
 {
 }
 
-static inline bool kmsan_memblock_free_pages(struct page *page,
-					     unsigned int order)
+static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
+							  unsigned int order)
 {
 	return true;
 }
@@ -251,10 +254,9 @@  static inline void kmsan_task_exit(struct task_struct *task)
 {
 }
 
-static inline int kmsan_alloc_page(struct page *page, unsigned int order,
-				   gfp_t flags)
+static inline void kmsan_alloc_page(struct page *page, unsigned int order,
+				    gfp_t flags)
 {
-	return 0;
 }
 
 static inline void kmsan_free_page(struct page *page, unsigned int order)
@@ -283,11 +285,9 @@  static inline void kmsan_kfree_large(const void *ptr)
 {
 }
 
-static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
-						 unsigned long end,
-						 pgprot_t prot,
-						 struct page **pages,
-						 unsigned int page_shift)
+static inline int __must_check kmsan_vmap_pages_range_noflush(
+	unsigned long start, unsigned long end, pgprot_t prot,
+	struct page **pages, unsigned int page_shift)
 {
 	return 0;
 }
@@ -297,10 +297,11 @@  static inline void kmsan_vunmap_range_noflush(unsigned long start,
 {
 }
 
-static inline int kmsan_ioremap_page_range(unsigned long start,
-					   unsigned long end,
-					   phys_addr_t phys_addr, pgprot_t prot,
-					   unsigned int page_shift)
+static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
+							unsigned long end,
+							phys_addr_t phys_addr,
+							pgprot_t prot,
+							unsigned int page_shift)
 {
 	return 0;
 }