diff mbox series

[v2,02/14] mm/page_alloc: add helper for checking if check_pages_enabled

Message ID 20230321170513.2401534-3-rppt@kernel.org (mailing list archive)
State Handled Elsewhere
Headers show
Series mm: move core MM initialization to mm/mm_init.c | expand

Commit Message

Mike Rapoport March 21, 2023, 5:05 p.m. UTC
From: "Mike Rapoport (IBM)" <rppt@kernel.org>

Instead of duplicating long static_branch_enabled(&check_pages_enabled)
wrap it in a helper function is_check_pages_enabled()

Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
---
 mm/page_alloc.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

Comments

Vlastimil Babka March 22, 2023, 11:38 a.m. UTC | #1
On 3/21/23 18:05, Mike Rapoport wrote:
> From: "Mike Rapoport (IBM)" <rppt@kernel.org>
> 
> Instead of duplicating long static_branch_enabled(&check_pages_enabled)
> wrap it in a helper function is_check_pages_enabled()
> 
> Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
> Reviewed-by: David Hildenbrand <david@redhat.com>

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  mm/page_alloc.c | 11 ++++++++---
>  1 file changed, 8 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 87d760236dba..e1149d54d738 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -245,6 +245,11 @@ EXPORT_SYMBOL(init_on_free);
>  /* perform sanity checks on struct pages being allocated or freed */
>  static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
>  
> +static inline bool is_check_pages_enabled(void)
> +{
> +	return static_branch_unlikely(&check_pages_enabled);
> +}
> +
>  static bool _init_on_alloc_enabled_early __read_mostly
>  				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
>  static int __init early_init_on_alloc(char *buf)
> @@ -1443,7 +1448,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
>  		for (i = 1; i < (1 << order); i++) {
>  			if (compound)
>  				bad += free_tail_pages_check(page, page + i);
> -			if (static_branch_unlikely(&check_pages_enabled)) {
> +			if (is_check_pages_enabled()) {
>  				if (unlikely(free_page_is_bad(page + i))) {
>  					bad++;
>  					continue;
> @@ -1456,7 +1461,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
>  		page->mapping = NULL;
>  	if (memcg_kmem_online() && PageMemcgKmem(page))
>  		__memcg_kmem_uncharge_page(page, order);
> -	if (static_branch_unlikely(&check_pages_enabled)) {
> +	if (is_check_pages_enabled()) {
>  		if (free_page_is_bad(page))
>  			bad++;
>  		if (bad)
> @@ -2366,7 +2371,7 @@ static int check_new_page(struct page *page)
>  
>  static inline bool check_new_pages(struct page *page, unsigned int order)
>  {
> -	if (static_branch_unlikely(&check_pages_enabled)) {
> +	if (is_check_pages_enabled()) {
>  		for (int i = 0; i < (1 << order); i++) {
>  			struct page *p = page + i;
>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 87d760236dba..e1149d54d738 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -245,6 +245,11 @@  EXPORT_SYMBOL(init_on_free);
 /* perform sanity checks on struct pages being allocated or freed */
 static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
 
+static inline bool is_check_pages_enabled(void)
+{
+	return static_branch_unlikely(&check_pages_enabled);
+}
+
 static bool _init_on_alloc_enabled_early __read_mostly
 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
 static int __init early_init_on_alloc(char *buf)
@@ -1443,7 +1448,7 @@  static __always_inline bool free_pages_prepare(struct page *page,
 		for (i = 1; i < (1 << order); i++) {
 			if (compound)
 				bad += free_tail_pages_check(page, page + i);
-			if (static_branch_unlikely(&check_pages_enabled)) {
+			if (is_check_pages_enabled()) {
 				if (unlikely(free_page_is_bad(page + i))) {
 					bad++;
 					continue;
@@ -1456,7 +1461,7 @@  static __always_inline bool free_pages_prepare(struct page *page,
 		page->mapping = NULL;
 	if (memcg_kmem_online() && PageMemcgKmem(page))
 		__memcg_kmem_uncharge_page(page, order);
-	if (static_branch_unlikely(&check_pages_enabled)) {
+	if (is_check_pages_enabled()) {
 		if (free_page_is_bad(page))
 			bad++;
 		if (bad)
@@ -2366,7 +2371,7 @@  static int check_new_page(struct page *page)
 
 static inline bool check_new_pages(struct page *page, unsigned int order)
 {
-	if (static_branch_unlikely(&check_pages_enabled)) {
+	if (is_check_pages_enabled()) {
 		for (int i = 0; i < (1 << order); i++) {
 			struct page *p = page + i;