@@ -36,30 +36,28 @@
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>
-#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
-
-#define STACK_ALLOC_NULL_PROTECTION_BITS 1
-#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
-#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
-#define STACK_ALLOC_ALIGN 4
-#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
- STACK_ALLOC_ALIGN)
-#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
- STACK_ALLOC_NULL_PROTECTION_BITS - \
- STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
-#define STACK_ALLOC_SLABS_CAP 8192
-#define STACK_ALLOC_MAX_SLABS \
- (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
- (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
+#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
+
+#define DEPOT_VALID_BITS 1
+#define DEPOT_SLAB_ORDER 2 /* Slab size order, 4 pages */
+#define DEPOT_SLAB_SIZE (1LL << (PAGE_SHIFT + DEPOT_SLAB_ORDER))
+#define DEPOT_STACK_ALIGN 4
+#define DEPOT_OFFSET_BITS (DEPOT_SLAB_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
+#define DEPOT_SLAB_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
+ DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
+#define DEPOT_SLABS_CAP 8192
+#define DEPOT_MAX_SLABS \
+ (((1LL << (DEPOT_SLAB_INDEX_BITS)) < DEPOT_SLABS_CAP) ? \
+ (1LL << (DEPOT_SLAB_INDEX_BITS)) : DEPOT_SLABS_CAP)
/* The compact structure to store the reference to stacks. */
union handle_parts {
depot_stack_handle_t handle;
struct {
- u32 slab_index : STACK_ALLOC_INDEX_BITS;
- u32 offset : STACK_ALLOC_OFFSET_BITS;
- u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
- u32 extra : STACK_DEPOT_EXTRA_BITS;
+ u32 slab_index : DEPOT_SLAB_INDEX_BITS;
+ u32 offset : DEPOT_OFFSET_BITS;
+ u32 valid : DEPOT_VALID_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
@@ -91,7 +89,7 @@ static unsigned int stack_bucket_number_order;
static unsigned int stack_hash_mask;
/* Array of memory regions that store stack traces. */
-static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
+static void *stack_slabs[DEPOT_MAX_SLABS];
/* Currently used slab in stack_slabs. */
static int slab_index;
/* Offset to the unused space in the currently used slab. */
@@ -235,7 +233,7 @@ static bool depot_init_slab(void **prealloc)
*prealloc = NULL;
} else {
/* If this is the last depot slab, do not touch the next one. */
- if (slab_index + 1 < STACK_ALLOC_MAX_SLABS) {
+ if (slab_index + 1 < DEPOT_MAX_SLABS) {
stack_slabs[slab_index + 1] = *prealloc;
*prealloc = NULL;
/*
@@ -256,10 +254,10 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
struct stack_record *stack;
size_t required_size = struct_size(stack, entries, size);
- required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
+ required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
- if (unlikely(slab_offset + required_size > STACK_ALLOC_SIZE)) {
- if (unlikely(slab_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
+ if (unlikely(slab_offset + required_size > DEPOT_SLAB_SIZE)) {
+ if (unlikely(slab_index + 1 >= DEPOT_MAX_SLABS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return NULL;
}
@@ -270,7 +268,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
* |next_slab_inited| in stack_depot_save() and
* depot_init_slab().
*/
- if (slab_index + 1 < STACK_ALLOC_MAX_SLABS)
+ if (slab_index + 1 < DEPOT_MAX_SLABS)
smp_store_release(&next_slab_inited, 0);
}
depot_init_slab(prealloc);
@@ -282,7 +280,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
stack->hash = hash;
stack->size = size;
stack->handle.slab_index = slab_index;
- stack->handle.offset = slab_offset >> STACK_ALLOC_ALIGN;
+ stack->handle.offset = slab_offset >> DEPOT_STACK_ALIGN;
stack->handle.valid = 1;
stack->handle.extra = 0;
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
@@ -413,7 +411,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
alloc_flags |= __GFP_NOWARN;
- page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
+ page = alloc_pages(alloc_flags, DEPOT_SLAB_ORDER);
if (page)
prealloc = page_address(page);
}
@@ -445,7 +443,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
exit:
if (prealloc) {
/* Nobody used this memory, ok to free it. */
- free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
+ free_pages((unsigned long)prealloc, DEPOT_SLAB_ORDER);
}
if (found)
retval.handle = found->handle.handle;
@@ -490,7 +488,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
{
union handle_parts parts = { .handle = handle };
void *slab;
- size_t offset = parts.offset << STACK_ALLOC_ALIGN;
+ size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
*entries = NULL;