diff mbox series

[3/4] mm/usercopy: Detect large folio overruns

Message ID 20220110231530.665970-4-willy@infradead.org (mailing list archive)
State Mainlined
Commit ab502103ae3ce4c0fc393e598455efede3e523c9
Headers show
Series Assorted improvements to usercopy | expand

Commit Message

Matthew Wilcox Jan. 10, 2022, 11:15 p.m. UTC
Move the compound page overrun detection out of
CONFIG_HARDENED_USERCOPY_PAGESPAN and convert it to use folios so it's
enabled for more people.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Kees Cook <keescook@chromium.org>
---
 mm/usercopy.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

Comments

David Hildenbrand Jan. 31, 2022, 2:28 p.m. UTC | #1
On 11.01.22 00:15, Matthew Wilcox (Oracle) wrote:
> Move the compound page overrun detection out of
> CONFIG_HARDENED_USERCOPY_PAGESPAN and convert it to use folios so it's
> enabled for more people.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Acked-by: Kees Cook <keescook@chromium.org>
> ---
>  mm/usercopy.c | 10 ++++------
>  1 file changed, 4 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/usercopy.c b/mm/usercopy.c
> index dcf71b7e3098..e1cb98087a05 100644
> --- a/mm/usercopy.c
> +++ b/mm/usercopy.c
> @@ -164,7 +164,6 @@ static inline void check_page_span(const void *ptr, unsigned long n,
>  {
>  #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
>  	const void *end = ptr + n - 1;
> -	struct page *endpage;
>  	bool is_reserved, is_cma;
>  
>  	/*
> @@ -195,11 +194,6 @@ static inline void check_page_span(const void *ptr, unsigned long n,
>  		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
>  		return;
>  
> -	/* Allow if fully inside the same compound (__GFP_COMP) page. */
> -	endpage = virt_to_head_page(end);
> -	if (likely(endpage == page))
> -		return;
> -
>  	/*
>  	 * Reject if range is entirely either Reserved (i.e. special or
>  	 * device memory), or CMA. Otherwise, reject since the object spans
> @@ -259,6 +253,10 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
>  	if (folio_test_slab(folio)) {
>  		/* Check slab allocator for flags and size. */
>  		__check_heap_object(ptr, n, folio_slab(folio), to_user);
> +	} else if (folio_test_large(folio)) {
> +		unsigned long offset = ptr - folio_address(folio);

Nit: I'd have added an empty line.

> +		if (offset + n > folio_size(folio))
> +			usercopy_abort("page alloc", NULL, to_user, offset, n);
>  	} else {
>  		/* Verify object does not incorrectly span multiple pages. */
>  		check_page_span(ptr, n, folio_page(folio, 0), to_user);

LGTM

Reviewed-by: David Hildenbrand <david@redhat.com>
diff mbox series

Patch

diff --git a/mm/usercopy.c b/mm/usercopy.c
index dcf71b7e3098..e1cb98087a05 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -164,7 +164,6 @@  static inline void check_page_span(const void *ptr, unsigned long n,
 {
 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
 	const void *end = ptr + n - 1;
-	struct page *endpage;
 	bool is_reserved, is_cma;
 
 	/*
@@ -195,11 +194,6 @@  static inline void check_page_span(const void *ptr, unsigned long n,
 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
 		return;
 
-	/* Allow if fully inside the same compound (__GFP_COMP) page. */
-	endpage = virt_to_head_page(end);
-	if (likely(endpage == page))
-		return;
-
 	/*
 	 * Reject if range is entirely either Reserved (i.e. special or
 	 * device memory), or CMA. Otherwise, reject since the object spans
@@ -259,6 +253,10 @@  static inline void check_heap_object(const void *ptr, unsigned long n,
 	if (folio_test_slab(folio)) {
 		/* Check slab allocator for flags and size. */
 		__check_heap_object(ptr, n, folio_slab(folio), to_user);
+	} else if (folio_test_large(folio)) {
+		unsigned long offset = ptr - folio_address(folio);
+		if (offset + n > folio_size(folio))
+			usercopy_abort("page alloc", NULL, to_user, offset, n);
 	} else {
 		/* Verify object does not incorrectly span multiple pages. */
 		check_page_span(ptr, n, folio_page(folio, 0), to_user);