@@ -41,4 +41,8 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, unsigned int flags,
void (*ctor)(void *));
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+ void **list);
+
#endif /* _TOOLS_SLAB_H */
@@ -93,14 +93,13 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
return p;
}
-void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
{
assert(objp);
uatomic_dec(&nr_allocated);
uatomic_dec(&cachep->nr_allocated);
if (kmalloc_verbose)
printf("Freeing %p to slab\n", objp);
- pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs > 10 || cachep->align) {
memset(objp, POISON_FREE, cachep->size);
free(objp);
@@ -110,9 +109,80 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
node->parent = cachep->objs;
cachep->objs = node;
}
+}
+
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+{
+ pthread_mutex_lock(&cachep->lock);
+ kmem_cache_free_locked(cachep, objp);
pthread_mutex_unlock(&cachep->lock);
}
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
+{
+ if (kmalloc_verbose)
+ pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
+
+ pthread_mutex_lock(&cachep->lock);
+ for (int i = 0; i < size; i++)
+ kmem_cache_free_locked(cachep, list[i]);
+ pthread_mutex_unlock(&cachep->lock);
+}
+
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+ void **p)
+{
+ size_t i;
+
+ if (kmalloc_verbose)
+ pr_debug("Bulk alloc %lu\n", size);
+
+ if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+ if (cachep->non_kernel < size)
+ return 0;
+
+ cachep->non_kernel -= size;
+ }
+
+ pthread_mutex_lock(&cachep->lock);
+ if (cachep->nr_objs >= size) {
+ struct radix_tree_node *node;
+
+ for (i = 0; i < size; i++) {
+ node = cachep->objs;
+ cachep->nr_objs--;
+ cachep->objs = node->parent;
+ p[i] = node;
+ node->parent = NULL;
+ }
+ pthread_mutex_unlock(&cachep->lock);
+ } else {
+ pthread_mutex_unlock(&cachep->lock);
+ for (i = 0; i < size; i++) {
+ if (cachep->align) {
+ posix_memalign(&p[i], cachep->align,
+ cachep->size * size);
+ } else {
+ p[i] = malloc(cachep->size * size);
+ }
+ if (cachep->ctor)
+ cachep->ctor(p[i]);
+ else if (gfp & __GFP_ZERO)
+ memset(p[i], 0, cachep->size);
+ }
+ }
+
+ for (i = 0; i < size; i++) {
+ uatomic_inc(&nr_allocated);
+ uatomic_inc(&cachep->nr_allocated);
+ uatomic_inc(&cachep->nr_tallocated);
+ if (kmalloc_verbose)
+ printf("Allocating %p from slab\n", p[i]);
+ }
+
+ return size;
+}
+
struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
unsigned int flags, void (*ctor)(void *))
@@ -130,3 +200,47 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
ret->non_kernel = 0;
return ret;
}
+
+/*
+ * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
+ */
+void test_kmem_cache_bulk(void)
+{
+ int i;
+ void *list[12];
+ static struct kmem_cache *test_cache, *test_cache2;
+
+ /*
+ * Testing the bulk allocators without aligned kmem_cache to force the
+ * bulk alloc/free to reuse
+ */
+ test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
+
+ for (i = 0; i < 5; i++)
+ list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+ for (i = 0; i < 5; i++)
+ kmem_cache_free(test_cache, list[i]);
+ assert(test_cache->nr_objs == 5);
+
+ kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
+ kmem_cache_free_bulk(test_cache, 5, list);
+
+ for (i = 0; i < 12 ; i++)
+ list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+ for (i = 0; i < 12; i++)
+ kmem_cache_free(test_cache, list[i]);
+
+ /* The last free will not be kept around */
+ assert(test_cache->nr_objs == 11);
+
+ /* Aligned caches will immediately free */
+ test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
+
+ kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
+ kmem_cache_free_bulk(test_cache2, 10, list);
+ assert(!test_cache2->nr_objs);
+
+
+}