diff mbox series

[3/3] usercopy: Make usercopy resilient against ridiculously large copies

Message ID 20220612213227.3881769-4-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Fixes for usercopy | expand

Commit Message

Matthew Wilcox June 12, 2022, 9:32 p.m. UTC
If 'n' is so large that it's negative, we might wrap around and mistakenly
think that the copy is OK when it's not.  Such a copy would probably
crash, but just doing the arithmetic in a more simple way lets us detect
and refuse this case.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/usercopy.c | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

Comments

Uladzislau Rezki June 13, 2022, 9:57 a.m. UTC | #1
> If 'n' is so large that it's negative, we might wrap around and mistakenly
> think that the copy is OK when it's not.  Such a copy would probably
> crash, but just doing the arithmetic in a more simple way lets us detect
> and refuse this case.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  mm/usercopy.c | 19 +++++++++----------
>  1 file changed, 9 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/usercopy.c b/mm/usercopy.c
> index 31deee7dd2f5..ff16083cf1c8 100644
> --- a/mm/usercopy.c
> +++ b/mm/usercopy.c
> @@ -162,20 +162,18 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
>  				     bool to_user)
>  {
>  	uintptr_t addr = (uintptr_t)ptr;
> +	unsigned long offset;
>  	struct folio *folio;
>  
>  	if (is_kmap_addr(ptr)) {
> -		unsigned long page_end = addr | (PAGE_SIZE - 1);
> -
> -		if (addr + n - 1 > page_end)
> -			usercopy_abort("kmap", NULL, to_user,
> -					offset_in_page(ptr), n);
> +		offset = offset_in_page(ptr);
> +		if (n > PAGE_SIZE - offset)
> +			usercopy_abort("kmap", NULL, to_user, offset, n);
>  		return;
>  	}
>  
>  	if (is_vmalloc_addr(ptr)) {
>  		struct vmap_area *area = find_vmap_area(addr);
> -		unsigned long offset;
>  
>  		if (!area) {
>  			usercopy_abort("vmalloc", "no area", to_user, 0, n);
> @@ -184,9 +182,10 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
>  
>  		/* XXX: We should also abort for free vmap_areas */
>  
> -		offset = addr - area->va_start;
> -		if (addr + n > area->va_end)
> +		if (n > area->va_end - addr) {
> +			offset = addr - area->va_start;
>  			usercopy_abort("vmalloc", NULL, to_user, offset, n);
> +		}
>  		return;
>  	}
>  
> @@ -199,8 +198,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
>  		/* Check slab allocator for flags and size. */
>  		__check_heap_object(ptr, n, folio_slab(folio), to_user);
>  	} else if (folio_test_large(folio)) {
> -		unsigned long offset = ptr - folio_address(folio);
> -		if (offset + n > folio_size(folio))
> +		offset = ptr - folio_address(folio);
> +		if (n > folio_size(folio) - offset)
>  			usercopy_abort("page alloc", NULL, to_user, offset, n);
>  	}
>  }
> -- 
> 2.35.1
> 
Looks good to me: Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/mm/usercopy.c b/mm/usercopy.c
index 31deee7dd2f5..ff16083cf1c8 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -162,20 +162,18 @@  static inline void check_heap_object(const void *ptr, unsigned long n,
 				     bool to_user)
 {
 	uintptr_t addr = (uintptr_t)ptr;
+	unsigned long offset;
 	struct folio *folio;
 
 	if (is_kmap_addr(ptr)) {
-		unsigned long page_end = addr | (PAGE_SIZE - 1);
-
-		if (addr + n - 1 > page_end)
-			usercopy_abort("kmap", NULL, to_user,
-					offset_in_page(ptr), n);
+		offset = offset_in_page(ptr);
+		if (n > PAGE_SIZE - offset)
+			usercopy_abort("kmap", NULL, to_user, offset, n);
 		return;
 	}
 
 	if (is_vmalloc_addr(ptr)) {
 		struct vmap_area *area = find_vmap_area(addr);
-		unsigned long offset;
 
 		if (!area) {
 			usercopy_abort("vmalloc", "no area", to_user, 0, n);
@@ -184,9 +182,10 @@  static inline void check_heap_object(const void *ptr, unsigned long n,
 
 		/* XXX: We should also abort for free vmap_areas */
 
-		offset = addr - area->va_start;
-		if (addr + n > area->va_end)
+		if (n > area->va_end - addr) {
+			offset = addr - area->va_start;
 			usercopy_abort("vmalloc", NULL, to_user, offset, n);
+		}
 		return;
 	}
 
@@ -199,8 +198,8 @@  static inline void check_heap_object(const void *ptr, unsigned long n,
 		/* Check slab allocator for flags and size. */
 		__check_heap_object(ptr, n, folio_slab(folio), to_user);
 	} else if (folio_test_large(folio)) {
-		unsigned long offset = ptr - folio_address(folio);
-		if (offset + n > folio_size(folio))
+		offset = ptr - folio_address(folio);
+		if (n > folio_size(folio) - offset)
 			usercopy_abort("page alloc", NULL, to_user, offset, n);
 	}
 }