@@ -547,6 +547,10 @@ struct kmem_cache_node {
#ifdef CONFIG_SLUB
unsigned long nr_partial;
struct list_head partial;
+#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
+ atomic_long_t partial_free_objs;
+ unsigned long partial_total_objs;
+#endif
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
atomic_long_t total_objects;
@@ -1890,10 +1890,31 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/*
* Management of partially allocated slabs.
*/
+#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
+static inline void
+__update_partial_free(struct kmem_cache_node *n, long delta)
+{
+ atomic_long_add(delta, &n->partial_free_objs);
+}
+
+static inline void
+__update_partial_total(struct kmem_cache_node *n, long delta)
+{
+ n->partial_total_objs += delta;
+}
+#else
+static inline void
+__update_partial_free(struct kmem_cache_node *n, long delta) { }
+
+static inline void
+__update_partial_total(struct kmem_cache_node *n, long delta) { }
+#endif
+
static inline void
__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
{
n->nr_partial++;
+ __update_partial_total(n, page->objects);
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->slab_list, &n->partial);
else
@@ -1913,6 +1934,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
lockdep_assert_held(&n->list_lock);
list_del(&page->slab_list);
n->nr_partial--;
+ __update_partial_total(n, -page->objects);
}
/*
@@ -1957,6 +1979,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
return NULL;
remove_partial(n, page);
+ __update_partial_free(n, -*objects);
WARN_ON(!freelist);
return freelist;
}
@@ -2286,8 +2309,11 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
"unfreezing slab"))
goto redo;
- if (lock)
+ if (lock) {
+ if (m == M_PARTIAL)
+ __update_partial_free(n, new.objects - new.inuse);
spin_unlock(&n->list_lock);
+ }
if (m == M_PARTIAL)
stat(s, tail);
@@ -2353,6 +2379,7 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_page = page;
} else {
add_partial(n, page, DEACTIVATE_TO_TAIL);
+ __update_partial_free(n, new.objects - new.inuse);
stat(s, FREE_ADD_PARTIAL);
}
}
@@ -3039,6 +3066,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
head, new.counters,
"__slab_free"));
+ if (!was_frozen && prior) {
+ if (n)
+ __update_partial_free(n, cnt);
+ else
+ __update_partial_free(get_node(s, page_to_nid(page)), cnt);
+ }
+
if (likely(!n)) {
if (likely(was_frozen)) {
@@ -3069,6 +3103,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
remove_full(s, n, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
+ __update_partial_free(n, cnt);
stat(s, FREE_ADD_PARTIAL);
}
spin_unlock_irqrestore(&n->list_lock, flags);
@@ -3080,6 +3115,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Slab on the partial list.
*/
remove_partial(n, page);
+ __update_partial_free(n, -page->objects);
stat(s, FREE_REMOVE_PARTIAL);
} else {
/* Slab must be on the full list */
@@ -3520,6 +3556,10 @@ static inline int calculate_order(unsigned int size)
n->nr_partial = 0;
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
+#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
+ atomic_long_set(&n->partial_free_objs, 0);
+ n->partial_total_objs = 0;
+#endif
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
atomic_long_set(&n->total_objects, 0);
@@ -3592,6 +3632,7 @@ static void early_kmem_cache_node_alloc(int node)
* initialized and there is no concurrent access.
*/
__add_partial(n, page, DEACTIVATE_TO_HEAD);
+ __update_partial_free(n, page->objects - page->inuse);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -3922,6 +3963,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_for_each_entry_safe(page, h, &n->partial, slab_list) {
if (!page->inuse) {
remove_partial(n, page);
+ __update_partial_free(n, -page->objects);
list_add(&page->slab_list, &discard);
} else {
list_slab_objects(s, page,
@@ -4263,6 +4305,8 @@ int __kmem_cache_shrink(struct kmem_cache *s)
if (free == page->objects) {
list_move(&page->slab_list, &discard);
n->nr_partial--;
+ __update_partial_free(n, -free);
+ __update_partial_total(n, -free);
} else if (free <= SHRINK_PROMOTE_MAX)
list_move(&page->slab_list, promote + free - 1);
}