@@ -43,6 +43,21 @@ static inline void __slab_clear_pfmemalloc(struct slab *slab)
__clear_bit(PG_pfmemalloc, &slab->flags);
}
+static inline bool slab_test_free(const struct slab *slab)
+{
+ return test_bit(PG_slob_free, &slab->flags);
+}
+
+static inline void __slab_set_free(struct slab *slab)
+{
+ __set_bit(PG_slob_free, &slab->flags);
+}
+
+static inline void __slab_clear_free(struct slab *slab)
+{
+ __clear_bit(PG_slob_free, &slab->flags);
+}
+
static inline void *slab_address(const struct slab *slab)
{
return page_address(slab_page(slab));
@@ -105,21 +105,21 @@ static LIST_HEAD(free_slob_large);
/*
* slob_page_free: true for pages on free_slob_pages list.
*/
-static inline int slob_page_free(struct page *sp)
+static inline int slob_page_free(struct slab *sp)
{
- return PageSlobFree(sp);
+ return slab_test_free(sp);
}
-static void set_slob_page_free(struct page *sp, struct list_head *list)
+static void set_slob_page_free(struct slab *sp, struct list_head *list)
{
list_add(&sp->slab_list, list);
- __SetPageSlobFree(sp);
+ __slab_set_free(sp);
}
-static inline void clear_slob_page_free(struct page *sp)
+static inline void clear_slob_page_free(struct slab *sp)
{
list_del(&sp->slab_list);
- __ClearPageSlobFree(sp);
+ __slab_clear_free(sp);
}
#define SLOB_UNIT sizeof(slob_t)
@@ -234,7 +234,7 @@ static void slob_free_pages(void *b, int order)
* freelist, in this case @page_removed_from_list will be set to
* true (set to false otherwise).
*/
-static void *slob_page_alloc(struct page *sp, size_t size, int align,
+static void *slob_page_alloc(struct slab *sp, size_t size, int align,
int align_offset, bool *page_removed_from_list)
{
slob_t *prev, *cur, *aligned = NULL;
@@ -301,7 +301,7 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
int align_offset)
{
- struct page *sp;
+ struct slab *sp;
struct list_head *slob_list;
slob_t *b = NULL;
unsigned long flags;
@@ -323,7 +323,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
* If there's a node specification, search for a partial
* page with a matching node id in the freelist.
*/
- if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
+ if (node != NUMA_NO_NODE && slab_nid(sp) != node)
continue;
#endif
/* Enough room on this page? */
@@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
return NULL;
- sp = virt_to_page(b);
- __SetPageSlab(sp);
+ sp = virt_to_slab(b);
+ __SetPageSlab(slab_page(sp));
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
@@ -381,7 +381,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
*/
static void slob_free(void *block, int size)
{
- struct page *sp;
+ struct slab *sp;
slob_t *prev, *next, *b = (slob_t *)block;
slobidx_t units;
unsigned long flags;
@@ -391,7 +391,7 @@ static void slob_free(void *block, int size)
return;
BUG_ON(!size);
- sp = virt_to_page(block);
+ sp = virt_to_slab(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
@@ -401,8 +401,8 @@ static void slob_free(void *block, int size)
if (slob_page_free(sp))
clear_slob_page_free(sp);
spin_unlock_irqrestore(&slob_lock, flags);
- __ClearPageSlab(sp);
- page_mapcount_reset(sp);
+ __ClearPageSlab(slab_page(sp));
+ page_mapcount_reset(slab_page(sp));
slob_free_pages(b, 0);
return;
}
Use struct slab throughout the slob allocator. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/slab.h | 15 +++++++++++++++ mm/slob.c | 30 +++++++++++++++--------------- 2 files changed, 30 insertions(+), 15 deletions(-)