@@ -64,19 +64,19 @@
*
* The slab_lock is only used for debugging and on arches that do not
* have the ability to do a cmpxchg_double. It only protects:
- * A. slab->freelist -> List of object free in a page
+ * A. slab->freelist -> List of object free in a slab
* B. slab->inuse -> Number of objects in use
- * C. slab->objects -> Number of objects in page
+ * C. slab->objects -> Number of objects in slab
* D. slab->frozen -> frozen state
*
* Frozen slabs
*
* If a slab is frozen then it is exempt from list management. It is not
* on any list except per cpu partial list. The processor that froze the
- * slab is the one who can perform list operations on the page. Other
+ * slab is the one who can perform list operations on the slab. Other
* processors may put objects onto the freelist but the processor that
* froze the slab is the only one that can retrieve the objects from the
- * page's freelist.
+ * slab's freelist.
*
* list_lock
*
@@ -135,7 +135,7 @@
* minimal so we rely on the page allocators per cpu caches for
* fast frees and allocs.
*
- * page->frozen The slab is frozen and exempt from list processing.
+ * slab->frozen The slab is frozen and exempt from list processing.
* This means that the slab is dedicated to a purpose
* such as satisfying allocations for a specific
* processor. Objects may be freed in the slab while
@@ -250,7 +250,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)
-#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
+#define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
/* Internal SLUB flags */
/* Poison object */
@@ -1753,14 +1753,21 @@ static inline struct slab *alloc_slab(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_order_objects oo)
{
struct page *page;
+ struct slab *slab;
unsigned int order = oo_order(oo);
if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order);
else
page = __alloc_pages_node(node, flags, order);
+ if (!page)
+ return NULL;
- return (struct slab *)page;
+ __SetPageSlab(page);
+ slab = (struct slab *)page;
+ if (page_is_pfmemalloc(page))
+ slab_set_pfmemalloc(slab);
+ return slab;
}
#ifdef CONFIG_SLAB_FREELIST_RANDOM
@@ -1781,7 +1788,7 @@ static int init_cache_random_seq(struct kmem_cache *s)
return err;
}
- /* Transform to an offset on the set of pages */
+ /* Transform to an offset on the set of slabs */
if (s->random_seq) {
unsigned int i;
@@ -1911,10 +1918,6 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
account_slab(slab, oo_order(oo), s, flags);
slab->slab_cache = s;
- __SetPageSlab(slab_page(slab));
- if (page_is_pfmemalloc(slab_page(slab)))
- slab_set_pfmemalloc(slab);
-
kasan_poison_slab(slab_page(slab));
start = slab_address(slab);
@@ -3494,7 +3497,7 @@ static inline void free_nonslab_page(struct page *page, void *object)
{
unsigned int order = compound_order(page);
- VM_BUG_ON_PAGE(!PageCompound(page), page);
+ VM_BUG_ON_PAGE(!PageHead(page), page);
kfree_hook(object);
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
__free_pages(page, order);
Remaining bits & pieces. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/slub.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-)