@@ -96,8 +96,9 @@ static inline void fault_config_init(struct fault_config *config,
struct kmem_cache;
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
-
+#endif
#ifdef CONFIG_FAIL_PAGE_ALLOC
bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
#else
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/fault-inject.h>
#include <linux/mm.h>
+#include "internal.h"
static struct {
struct fault_attr attr;
@@ -9,7 +10,7 @@ static struct {
bool ignore_gfp_reclaim;
u32 min_order;
} fail_page_alloc = {
- .attr = FAULT_ATTR_INITIALIZER,
+ .attr = FAULT_ATTR_INITIALIZER_KEY(&should_fail_alloc_page_active.key),
.ignore_gfp_reclaim = true,
.ignore_gfp_highmem = true,
.min_order = 1,
@@ -410,6 +410,8 @@ extern char * const zone_names[MAX_NR_ZONES];
/* perform sanity checks on struct pages being allocated or freed */
DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
+DECLARE_STATIC_KEY_FALSE(should_fail_alloc_page_active);
+
extern int min_free_kbytes;
void setup_per_zone_wmarks(void);
@@ -3008,11 +3008,35 @@ struct page *rmqueue(struct zone *preferred_zone,
return page;
}
-noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+#if defined(CONFIG_FUNCTION_ERROR_INJECTION) || defined(CONFIG_FAIL_PAGE_ALLOC)
+DEFINE_STATIC_KEY_FALSE(should_fail_alloc_page_active);
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+noinline
+#else
+static inline
+#endif
+bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return __should_fail_alloc_page(gfp_mask, order);
}
-ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
+ALLOW_ERROR_INJECTION_KEY(should_fail_alloc_page, TRUE, &should_fail_alloc_page_active);
+
+static __always_inline bool
+should_fail_alloc_page_wrapped(gfp_t gfp_mask, unsigned int order)
+{
+ if (static_branch_unlikely(&should_fail_alloc_page_active))
+ return should_fail_alloc_page(gfp_mask, order);
+
+ return false;
+}
+#else
+static __always_inline bool
+should_fail_alloc_page_wrapped(gfp_t gfp_mask, unsigned int order)
+{
+ return false;
+}
+#endif
static inline long __zone_watermark_unusable_free(struct zone *z,
unsigned int order, unsigned int alloc_flags)
@@ -4430,7 +4454,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
might_alloc(gfp_mask);
- if (should_fail_alloc_page(gfp_mask, order))
+ if (should_fail_alloc_page_wrapped(gfp_mask, order))
return false;
*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);