@@ -616,6 +616,8 @@ struct kmem_cache_node {
#ifdef CONFIG_SLUB
unsigned long nr_partial;
struct list_head partial;
+ atomic_long_t partial_free_objs;
+ atomic_long_t partial_total_objs;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
atomic_long_t total_objects;
@@ -1775,10 +1775,24 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/*
* Management of partially allocated slabs.
*/
+
+static inline void
+__update_partial_free(struct kmem_cache_node *n, long delta)
+{
+ atomic_long_add(delta, &n->partial_free_objs);
+}
+
+static inline void
+__update_partial_total(struct kmem_cache_node *n, long delta)
+{
+ atomic_long_add(delta, &n->partial_total_objs);
+}
+
static inline void
__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
{
n->nr_partial++;
+ __update_partial_total(n, page->objects);
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->slab_list, &n->partial);
else
@@ -1798,6 +1812,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
lockdep_assert_held(&n->list_lock);
list_del(&page->slab_list);
n->nr_partial--;
+ __update_partial_total(n, -page->objects);
}
/*
@@ -1842,6 +1857,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
return NULL;
remove_partial(n, page);
+ __update_partial_free(n, -*objects);
WARN_ON(!freelist);
return freelist;
}
@@ -2174,8 +2190,11 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
"unfreezing slab"))
goto redo;
- if (lock)
+ if (lock) {
+ if (m == M_PARTIAL)
+ __update_partial_free(n, page->objects - page->inuse);
spin_unlock(&n->list_lock);
+ }
if (m == M_PARTIAL)
stat(s, tail);
@@ -2241,6 +2260,7 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_page = page;
} else {
add_partial(n, page, DEACTIVATE_TO_TAIL);
+ __update_partial_free(n, page->objects - page->inuse);
stat(s, FREE_ADD_PARTIAL);
}
}
@@ -2915,6 +2935,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
head, new.counters,
"__slab_free"));
+ if (!was_frozen && prior) {
+ if (n)
+ __update_partial_free(n, cnt);
+ else
+ __update_partial_free(get_node(s, page_to_nid(page)), cnt);
+ }
+
if (likely(!n)) {
/*
@@ -2944,6 +2971,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
remove_full(s, n, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
+ __update_partial_free(n, page->objects - page->inuse);
stat(s, FREE_ADD_PARTIAL);
}
spin_unlock_irqrestore(&n->list_lock, flags);
@@ -2955,6 +2983,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Slab on the partial list.
*/
remove_partial(n, page);
+ __update_partial_free(n, page->inuse - page->objects);
stat(s, FREE_REMOVE_PARTIAL);
} else {
/* Slab must be on the full list */
@@ -3364,6 +3393,8 @@ static inline int calculate_order(unsigned int size)
n->nr_partial = 0;
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
+ atomic_long_set(&n->partial_free_objs, 0);
+ atomic_long_set(&n->partial_total_objs, 0);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
atomic_long_set(&n->total_objects, 0);
@@ -3437,6 +3468,7 @@ static void early_kmem_cache_node_alloc(int node)
* initialized and there is no concurrent access.
*/
__add_partial(n, page, DEACTIVATE_TO_HEAD);
+ __update_partial_free(n, page->objects - page->inuse);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -3747,6 +3779,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_for_each_entry_safe(page, h, &n->partial, slab_list) {
if (!page->inuse) {
remove_partial(n, page);
+ __update_partial_free(n, page->objects - page->inuse);
list_add(&page->slab_list, &discard);
} else {
list_slab_objects(s, page,
@@ -4045,6 +4078,8 @@ int __kmem_cache_shrink(struct kmem_cache *s)
if (free == page->objects) {
list_move(&page->slab_list, &discard);
n->nr_partial--;
+ __update_partial_free(n, -free);
+ __update_partial_total(n, -free);
} else if (free <= SHRINK_PROMOTE_MAX)
list_move(&page->slab_list, promote + free - 1);
}