diff mbox series

[v2] mm/mempool: Add 'else' to split mutually exclusive case

Message ID 20200924111641.28922-1-linmiaohe@huawei.com (mailing list archive)
State New, archived
Headers show
Series [v2] mm/mempool: Add 'else' to split mutually exclusive case | expand

Commit Message

Miaohe Lin Sept. 24, 2020, 11:16 a.m. UTC
Add else to split mutually exclusive case and avoid some unnecessary check.
It doesn't seem to change code generation (compiler is smart), but I think
it helps readability.

Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
 mm/mempool.c | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

Comments

Andrew Morton Sept. 25, 2020, 2:40 a.m. UTC | #1
On Thu, 24 Sep 2020 07:16:41 -0400 Miaohe Lin <linmiaohe@huawei.com> wrote:

> Add else to split mutually exclusive case and avoid some unnecessary check.
> It doesn't seem to change code generation (compiler is smart), but I think
> it helps readability.
> 
> ...
>
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -58,11 +58,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
>  static void check_element(mempool_t *pool, void *element)
>  {
>  	/* Mempools backed by slab allocator */
> -	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
> +	if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
>  		__check_element(pool, element, ksize(element));
> -
>  	/* Mempools backed by page allocator */
> -	if (pool->free == mempool_free_pages) {
> +	} else if (pool->free == mempool_free_pages) {
>  		int order = (int)(long)pool->pool_data;
>  		void *addr = kmap_atomic((struct page *)element);
>  
> @@ -82,11 +81,10 @@ static void __poison_element(void *element, size_t size)
>  static void poison_element(mempool_t *pool, void *element)
>  {
>  	/* Mempools backed by slab allocator */
> -	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> +	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
>  		__poison_element(element, ksize(element));
> -
>  	/* Mempools backed by page allocator */
> -	if (pool->alloc == mempool_alloc_pages) {
> +	} else if (pool->alloc == mempool_alloc_pages) {
>  		int order = (int)(long)pool->pool_data;
>  		void *addr = kmap_atomic((struct page *)element);
>  

OK, I guess.  But the comments are now in the wrong place.

--- a/mm/mempool.c~mm-mempool-add-else-to-split-mutually-exclusive-case-fix
+++ a/mm/mempool.c
@@ -60,8 +60,8 @@ static void check_element(mempool_t *poo
 	/* Mempools backed by slab allocator */
 	if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
 		__check_element(pool, element, ksize(element));
-	/* Mempools backed by page allocator */
 	} else if (pool->free == mempool_free_pages) {
+		/* Mempools backed by page allocator */
 		int order = (int)(long)pool->pool_data;
 		void *addr = kmap_atomic((struct page *)element);
 
@@ -83,8 +83,8 @@ static void poison_element(mempool_t *po
 	/* Mempools backed by slab allocator */
 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
 		__poison_element(element, ksize(element));
-	/* Mempools backed by page allocator */
 	} else if (pool->alloc == mempool_alloc_pages) {
+		/* Mempools backed by page allocator */
 		int order = (int)(long)pool->pool_data;
 		void *addr = kmap_atomic((struct page *)element);
diff mbox series

Patch

diff --git a/mm/mempool.c b/mm/mempool.c
index 79bff63ecf27..95fa8e601730 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -58,11 +58,10 @@  static void __check_element(mempool_t *pool, void *element, size_t size)
 static void check_element(mempool_t *pool, void *element)
 {
 	/* Mempools backed by slab allocator */
-	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
+	if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
 		__check_element(pool, element, ksize(element));
-
 	/* Mempools backed by page allocator */
-	if (pool->free == mempool_free_pages) {
+	} else if (pool->free == mempool_free_pages) {
 		int order = (int)(long)pool->pool_data;
 		void *addr = kmap_atomic((struct page *)element);
 
@@ -82,11 +81,10 @@  static void __poison_element(void *element, size_t size)
 static void poison_element(mempool_t *pool, void *element)
 {
 	/* Mempools backed by slab allocator */
-	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
 		__poison_element(element, ksize(element));
-
 	/* Mempools backed by page allocator */
-	if (pool->alloc == mempool_alloc_pages) {
+	} else if (pool->alloc == mempool_alloc_pages) {
 		int order = (int)(long)pool->pool_data;
 		void *addr = kmap_atomic((struct page *)element);
 
@@ -107,7 +105,7 @@  static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 {
 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 		kasan_poison_kfree(element, _RET_IP_);
-	if (pool->alloc == mempool_alloc_pages)
+	else if (pool->alloc == mempool_alloc_pages)
 		kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
 
@@ -115,7 +113,7 @@  static void kasan_unpoison_element(mempool_t *pool, void *element)
 {
 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 		kasan_unpoison_slab(element);
-	if (pool->alloc == mempool_alloc_pages)
+	else if (pool->alloc == mempool_alloc_pages)
 		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 }