@@ -25,10 +25,12 @@ struct slab {
union {
struct list_head slab_list;
struct rcu_head rcu_head;
+#ifdef CONFIG_SLUB_CPU_PARTIAL
struct {
struct slab *next;
int slabs; /* Nr of slabs left */
};
+#endif
};
struct kmem_cache *slab_cache;
/* Double-word boundary */
@@ -5256,6 +5256,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x;
nodes[node] += x;
+#ifdef CONFIG_SLUB_CPU_PARTIAL
slab = slub_percpu_partial_read_once(c);
if (slab) {
node = slab_nid(slab);
@@ -5268,6 +5269,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x;
nodes[node] += x;
}
+#endif
}
}
@@ -5467,9 +5469,10 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
{
int objects = 0;
int slabs = 0;
- int cpu;
+ int cpu __maybe_unused;
int len = 0;
+#ifdef CONFIG_SLUB_CPU_PARTIAL
for_each_online_cpu(cpu) {
struct slab *slab;
@@ -5478,12 +5481,13 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
if (slab)
slabs += slab->slabs;
}
+#endif
/* Approximate half-full slabs, see slub_set_cpu_partial() */
objects = (slabs * oo_objects(s->oo)) / 2;
len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP)
for_each_online_cpu(cpu) {
struct slab *slab;
The fields 'next' and 'slabs' are only used when CONFIG_SLUB_CPU_PARTIAL is enabled. We can put their definition to #ifdef to prevent accidental use when disabled. Currenlty show_slab_objects() and slabs_cpu_partial_show() contain code accessing the slabs field that's effectively dead with CONFIG_SLUB_CPU_PARTIAL=n through the wrappers slub_percpu_partial() and slub_percpu_partial_read_once(), but to prevent a compile error, we need to hide all this code behind #ifdef. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> --- mm/slab.h | 2 ++ mm/slub.c | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-)