diff mbox series

[09/12] kasan: ensure poisoning size alignment

Message ID fee7c8c751dbf871e957935c347fcf7f1ca49beb.1612208222.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: optimizations and fixes for HW_TAGS | expand

Commit Message

Andrey Konovalov Feb. 1, 2021, 7:43 p.m. UTC
A previous changes d99f6a10c161 ("kasan: don't round_up too much")
attempted to simplify the code by adding a round_up(size) call into
kasan_poison(). While this allows to have less round_up() calls around
the code, this results in round_up() being called multiple times.

This patch removes round_up() of size from kasan_poison() and ensures
that all callers round_up() the size explicitly. This patch also adds
WARN_ON() alignment checks for address and size to kasan_poison() and
kasan_unpoison().

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/kasan/common.c |  9 ++++++---
 mm/kasan/kasan.h  | 33 ++++++++++++++++++++-------------
 mm/kasan/shadow.c | 37 ++++++++++++++++++++++---------------
 3 files changed, 48 insertions(+), 31 deletions(-)

Comments

Marco Elver Feb. 3, 2021, 3:31 p.m. UTC | #1
On Mon, Feb 01, 2021 at 08:43PM +0100, Andrey Konovalov wrote:
> A previous changes d99f6a10c161 ("kasan: don't round_up too much")
> attempted to simplify the code by adding a round_up(size) call into
> kasan_poison(). While this allows to have less round_up() calls around
> the code, this results in round_up() being called multiple times.
> 
> This patch removes round_up() of size from kasan_poison() and ensures
> that all callers round_up() the size explicitly. This patch also adds
> WARN_ON() alignment checks for address and size to kasan_poison() and
> kasan_unpoison().
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>

Reviewed-by: Marco Elver <elver@google.com>

> ---
>  mm/kasan/common.c |  9 ++++++---
>  mm/kasan/kasan.h  | 33 ++++++++++++++++++++-------------
>  mm/kasan/shadow.c | 37 ++++++++++++++++++++++---------------
>  3 files changed, 48 insertions(+), 31 deletions(-)
> 
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index a51d6ea580b0..5691cca69397 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -261,7 +261,8 @@ void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
>  
>  void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
>  {
> -	kasan_poison(object, cache->object_size, KASAN_KMALLOC_REDZONE);
> +	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
> +			KASAN_KMALLOC_REDZONE);
>  }
>  
>  /*
> @@ -348,7 +349,8 @@ static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
>  		return true;
>  	}
>  
> -	kasan_poison(object, cache->object_size, KASAN_KMALLOC_FREE);
> +	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
> +			KASAN_KMALLOC_FREE);
>  
>  	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
>  		return false;
> @@ -490,7 +492,8 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
>  	/* Poison the aligned part of the redzone. */
>  	redzone_start = round_up((unsigned long)(object + size),
>  				KASAN_GRANULE_SIZE);
> -	redzone_end = (unsigned long)object + cache->object_size;
> +	redzone_end = round_up((unsigned long)(object + cache->object_size),
> +				KASAN_GRANULE_SIZE);
>  	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
>  			   KASAN_KMALLOC_REDZONE);
>  
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 6a2882997f23..2f7400a3412f 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -321,30 +321,37 @@ static inline u8 kasan_random_tag(void) { return 0; }
>  
>  #ifdef CONFIG_KASAN_HW_TAGS
>  
> -static inline void kasan_poison(const void *address, size_t size, u8 value)
> +static inline void kasan_poison(const void *addr, size_t size, u8 value)
>  {
> -	address = kasan_reset_tag(address);
> +	addr = kasan_reset_tag(addr);
>  
>  	/* Skip KFENCE memory if called explicitly outside of sl*b. */
> -	if (is_kfence_address(address))
> +	if (is_kfence_address(addr))
>  		return;
>  
> -	hw_set_mem_tag_range((void *)address,
> -			round_up(size, KASAN_GRANULE_SIZE), value);
> +	if (WARN_ON((u64)addr & KASAN_GRANULE_MASK))
> +		return;
> +	if (WARN_ON(size & KASAN_GRANULE_MASK))
> +		return;
> +
> +	hw_set_mem_tag_range((void *)addr, size, value);
>  }
>  
> -static inline void kasan_unpoison(const void *address, size_t size)
> +static inline void kasan_unpoison(const void *addr, size_t size)
>  {
> -	u8 tag = get_tag(address);
> +	u8 tag = get_tag(addr);
>  
> -	address = kasan_reset_tag(address);
> +	addr = kasan_reset_tag(addr);
>  
>  	/* Skip KFENCE memory if called explicitly outside of sl*b. */
> -	if (is_kfence_address(address))
> +	if (is_kfence_address(addr))
>  		return;
>  
> -	hw_set_mem_tag_range((void *)address,
> -			round_up(size, KASAN_GRANULE_SIZE), tag);
> +	if (WARN_ON((u64)addr & KASAN_GRANULE_MASK))
> +		return;
> +	size = round_up(size, KASAN_GRANULE_SIZE);
> +
> +	hw_set_mem_tag_range((void *)addr, size, tag);
>  }
>  
>  static inline bool kasan_byte_accessible(const void *addr)
> @@ -361,7 +368,7 @@ static inline bool kasan_byte_accessible(const void *addr)
>  /**
>   * kasan_poison - mark the memory range as unaccessible
>   * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
> - * @size - range size
> + * @size - range size, must be aligned to KASAN_GRANULE_SIZE
>   * @value - value that's written to metadata for the range
>   *
>   * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
> @@ -371,7 +378,7 @@ void kasan_poison(const void *addr, size_t size, u8 value);
>  /**
>   * kasan_unpoison - mark the memory range as accessible
>   * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
> - * @size - range size
> + * @size - range size, can be unaligned
>   *
>   * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
>   * marking the range.
> diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
> index 1ed7817e4ee6..c97f51c557ea 100644
> --- a/mm/kasan/shadow.c
> +++ b/mm/kasan/shadow.c
> @@ -69,7 +69,7 @@ void *memcpy(void *dest, const void *src, size_t len)
>  	return __memcpy(dest, src, len);
>  }
>  
> -void kasan_poison(const void *address, size_t size, u8 value)
> +void kasan_poison(const void *addr, size_t size, u8 value)
>  {
>  	void *shadow_start, *shadow_end;
>  
> @@ -78,55 +78,62 @@ void kasan_poison(const void *address, size_t size, u8 value)
>  	 * some of the callers (e.g. kasan_poison_object_data) pass tagged
>  	 * addresses to this function.
>  	 */
> -	address = kasan_reset_tag(address);
> +	addr = kasan_reset_tag(addr);
>  
>  	/* Skip KFENCE memory if called explicitly outside of sl*b. */
> -	if (is_kfence_address(address))
> +	if (is_kfence_address(addr))
>  		return;
>  
> -	size = round_up(size, KASAN_GRANULE_SIZE);
> -	shadow_start = kasan_mem_to_shadow(address);
> -	shadow_end = kasan_mem_to_shadow(address + size);
> +	if (WARN_ON((u64)addr & KASAN_GRANULE_MASK))
> +		return;
> +	if (WARN_ON(size & KASAN_GRANULE_MASK))
> +		return;
> +
> +	shadow_start = kasan_mem_to_shadow(addr);
> +	shadow_end = kasan_mem_to_shadow(addr + size);
>  
>  	__memset(shadow_start, value, shadow_end - shadow_start);
>  }
>  EXPORT_SYMBOL(kasan_poison);
>  
>  #ifdef CONFIG_KASAN_GENERIC
> -void kasan_poison_last_granule(const void *address, size_t size)
> +void kasan_poison_last_granule(const void *addr, size_t size)
>  {
>  	if (size & KASAN_GRANULE_MASK) {
> -		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
> +		u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
>  		*shadow = size & KASAN_GRANULE_MASK;
>  	}
>  }
>  #endif
>  
> -void kasan_unpoison(const void *address, size_t size)
> +void kasan_unpoison(const void *addr, size_t size)
>  {
> -	u8 tag = get_tag(address);
> +	u8 tag = get_tag(addr);
>  
>  	/*
>  	 * Perform shadow offset calculation based on untagged address, as
>  	 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
>  	 * addresses to this function.
>  	 */
> -	address = kasan_reset_tag(address);
> +	addr = kasan_reset_tag(addr);
>  
>  	/*
>  	 * Skip KFENCE memory if called explicitly outside of sl*b. Also note
>  	 * that calls to ksize(), where size is not a multiple of machine-word
>  	 * size, would otherwise poison the invalid portion of the word.
>  	 */
> -	if (is_kfence_address(address))
> +	if (is_kfence_address(addr))
> +		return;
> +
> +	if (WARN_ON((u64)addr & KASAN_GRANULE_MASK))
>  		return;
>  
> -	/* Unpoison round_up(size, KASAN_GRANULE_SIZE) bytes. */
> -	kasan_poison(address, size, tag);
> +	/* Unpoison all granules that cover the object. */
> +	kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag);
>  
>  	/* Partially poison the last granule for the generic mode. */
>  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
> -		kasan_poison_last_granule(address, size);
> +		kasan_poison_last_granule(addr, size);
>  }
>  
>  #ifdef CONFIG_MEMORY_HOTPLUG
> -- 
> 2.30.0.365.g02bc693789-goog
>
diff mbox series

Patch

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index a51d6ea580b0..5691cca69397 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -261,7 +261,8 @@  void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
 
 void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
 {
-	kasan_poison(object, cache->object_size, KASAN_KMALLOC_REDZONE);
+	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
+			KASAN_KMALLOC_REDZONE);
 }
 
 /*
@@ -348,7 +349,8 @@  static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
 		return true;
 	}
 
-	kasan_poison(object, cache->object_size, KASAN_KMALLOC_FREE);
+	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
+			KASAN_KMALLOC_FREE);
 
 	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
 		return false;
@@ -490,7 +492,8 @@  static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
 	/* Poison the aligned part of the redzone. */
 	redzone_start = round_up((unsigned long)(object + size),
 				KASAN_GRANULE_SIZE);
-	redzone_end = (unsigned long)object + cache->object_size;
+	redzone_end = round_up((unsigned long)(object + cache->object_size),
+				KASAN_GRANULE_SIZE);
 	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
 			   KASAN_KMALLOC_REDZONE);
 
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 6a2882997f23..2f7400a3412f 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -321,30 +321,37 @@  static inline u8 kasan_random_tag(void) { return 0; }
 
 #ifdef CONFIG_KASAN_HW_TAGS
 
-static inline void kasan_poison(const void *address, size_t size, u8 value)
+static inline void kasan_poison(const void *addr, size_t size, u8 value)
 {
-	address = kasan_reset_tag(address);
+	addr = kasan_reset_tag(addr);
 
 	/* Skip KFENCE memory if called explicitly outside of sl*b. */
-	if (is_kfence_address(address))
+	if (is_kfence_address(addr))
 		return;
 
-	hw_set_mem_tag_range((void *)address,
-			round_up(size, KASAN_GRANULE_SIZE), value);
+	if (WARN_ON((u64)addr & KASAN_GRANULE_MASK))
+		return;
+	if (WARN_ON(size & KASAN_GRANULE_MASK))
+		return;
+
+	hw_set_mem_tag_range((void *)addr, size, value);
 }
 
-static inline void kasan_unpoison(const void *address, size_t size)
+static inline void kasan_unpoison(const void *addr, size_t size)
 {
-	u8 tag = get_tag(address);
+	u8 tag = get_tag(addr);
 
-	address = kasan_reset_tag(address);
+	addr = kasan_reset_tag(addr);
 
 	/* Skip KFENCE memory if called explicitly outside of sl*b. */
-	if (is_kfence_address(address))
+	if (is_kfence_address(addr))
 		return;
 
-	hw_set_mem_tag_range((void *)address,
-			round_up(size, KASAN_GRANULE_SIZE), tag);
+	if (WARN_ON((u64)addr & KASAN_GRANULE_MASK))
+		return;
+	size = round_up(size, KASAN_GRANULE_SIZE);
+
+	hw_set_mem_tag_range((void *)addr, size, tag);
 }
 
 static inline bool kasan_byte_accessible(const void *addr)
@@ -361,7 +368,7 @@  static inline bool kasan_byte_accessible(const void *addr)
 /**
  * kasan_poison - mark the memory range as unaccessible
  * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
- * @size - range size
+ * @size - range size, must be aligned to KASAN_GRANULE_SIZE
  * @value - value that's written to metadata for the range
  *
  * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
@@ -371,7 +378,7 @@  void kasan_poison(const void *addr, size_t size, u8 value);
 /**
  * kasan_unpoison - mark the memory range as accessible
  * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
- * @size - range size
+ * @size - range size, can be unaligned
  *
  * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
  * marking the range.
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 1ed7817e4ee6..c97f51c557ea 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -69,7 +69,7 @@  void *memcpy(void *dest, const void *src, size_t len)
 	return __memcpy(dest, src, len);
 }
 
-void kasan_poison(const void *address, size_t size, u8 value)
+void kasan_poison(const void *addr, size_t size, u8 value)
 {
 	void *shadow_start, *shadow_end;
 
@@ -78,55 +78,62 @@  void kasan_poison(const void *address, size_t size, u8 value)
 	 * some of the callers (e.g. kasan_poison_object_data) pass tagged
 	 * addresses to this function.
 	 */
-	address = kasan_reset_tag(address);
+	addr = kasan_reset_tag(addr);
 
 	/* Skip KFENCE memory if called explicitly outside of sl*b. */
-	if (is_kfence_address(address))
+	if (is_kfence_address(addr))
 		return;
 
-	size = round_up(size, KASAN_GRANULE_SIZE);
-	shadow_start = kasan_mem_to_shadow(address);
-	shadow_end = kasan_mem_to_shadow(address + size);
+	if (WARN_ON((u64)addr & KASAN_GRANULE_MASK))
+		return;
+	if (WARN_ON(size & KASAN_GRANULE_MASK))
+		return;
+
+	shadow_start = kasan_mem_to_shadow(addr);
+	shadow_end = kasan_mem_to_shadow(addr + size);
 
 	__memset(shadow_start, value, shadow_end - shadow_start);
 }
 EXPORT_SYMBOL(kasan_poison);
 
 #ifdef CONFIG_KASAN_GENERIC
-void kasan_poison_last_granule(const void *address, size_t size)
+void kasan_poison_last_granule(const void *addr, size_t size)
 {
 	if (size & KASAN_GRANULE_MASK) {
-		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
+		u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
 		*shadow = size & KASAN_GRANULE_MASK;
 	}
 }
 #endif
 
-void kasan_unpoison(const void *address, size_t size)
+void kasan_unpoison(const void *addr, size_t size)
 {
-	u8 tag = get_tag(address);
+	u8 tag = get_tag(addr);
 
 	/*
 	 * Perform shadow offset calculation based on untagged address, as
 	 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
 	 * addresses to this function.
 	 */
-	address = kasan_reset_tag(address);
+	addr = kasan_reset_tag(addr);
 
 	/*
 	 * Skip KFENCE memory if called explicitly outside of sl*b. Also note
 	 * that calls to ksize(), where size is not a multiple of machine-word
 	 * size, would otherwise poison the invalid portion of the word.
 	 */
-	if (is_kfence_address(address))
+	if (is_kfence_address(addr))
+		return;
+
+	if (WARN_ON((u64)addr & KASAN_GRANULE_MASK))
 		return;
 
-	/* Unpoison round_up(size, KASAN_GRANULE_SIZE) bytes. */
-	kasan_poison(address, size, tag);
+	/* Unpoison all granules that cover the object. */
+	kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag);
 
 	/* Partially poison the last granule for the generic mode. */
 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
-		kasan_poison_last_granule(address, size);
+		kasan_poison_last_granule(addr, size);
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG