Message ID | alpine.DEB.2.20.1801161215500.2945@nuc-kabylake (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, Jan 16, 2018 at 12:17:01PM -0600, Christopher Lameter wrote: > Draft patch of how the data structs could change. kmem_cache_attr is read > only. Looks good. Although I would add Kees' user feature: struct kmem_cache_attr { char name[16]; unsigned int size; unsigned int align; + unsigned int useroffset; + unsigned int usersize; slab_flags_t flags; kmem_cache_ctor ctor; } And I'd start with +struct kmem_cache *kmem_cache_create_attr(const kmem_cache_attr *); leaving the old kmem_cache_create to kmalloc a kmem_cache_attr and initialise it. Can we also do something like this? -#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ - sizeof(struct __struct), __alignof__(struct __struct),\ - (__flags), NULL) +#define KMEM_CACHE(__struct, __flags) ({ \ + const struct kmem_cache_attr kca ## __stringify(__struct) = { \ + .name = #__struct, \ + .size = sizeof(struct __struct), \ + .align = __alignof__(struct __struct), \ + .flags = (__flags), \ + }; \ + kmem_cache_create_attr(&kca ## __stringify(__struct)); \ +}) That way we won't need to convert any of those users.
On Tue, 16 Jan 2018, Matthew Wilcox wrote: > On Tue, Jan 16, 2018 at 12:17:01PM -0600, Christopher Lameter wrote: > > Draft patch of how the data structs could change. kmem_cache_attr is read > > only. > > Looks good. Although I would add Kees' user feature: Sure I tried to do this quickly so that the basic struct changes are visible. > And I'd start with > +struct kmem_cache *kmem_cache_create_attr(const kmem_cache_attr *); > > leaving the old kmem_cache_create to kmalloc a kmem_cache_attr and > initialise it. Well at some point we should convert the callers by putting the definitions into const kmem_cache_attr initializations. That way the callbacks function pointers are safe. > Can we also do something like this? > > -#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ > - sizeof(struct __struct), __alignof__(struct __struct),\ > - (__flags), NULL) > +#define KMEM_CACHE(__struct, __flags) ({ \ > + const struct kmem_cache_attr kca ## __stringify(__struct) = { \ > + .name = #__struct, \ > + .size = sizeof(struct __struct), \ > + .align = __alignof__(struct __struct), \ > + .flags = (__flags), \ > + }; \ > + kmem_cache_create_attr(&kca ## __stringify(__struct)); \ > +}) > > That way we won't need to convert any of those users. Yep thats what I was planning.
Index: linux/include/linux/slab.h =================================================================== --- linux.orig/include/linux/slab.h +++ linux/include/linux/slab.h @@ -135,9 +135,17 @@ struct mem_cgroup; void __init kmem_cache_init(void); bool slab_is_available(void); -struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, - slab_flags_t, - void (*)(void *)); +typedef kmem_cache_ctor void (*ctor)(void *); + +struct kmem_cache_attr { + char name[16]; + unsigned int size; + unsigned int align; + slab_flags_t flags; + kmem_cache_ctor ctor; +} + +struct kmem_cache *kmem_cache_create(const kmem_cache_attr *); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); Index: linux/include/linux/slab_def.h =================================================================== --- linux.orig/include/linux/slab_def.h +++ linux/include/linux/slab_def.h @@ -10,6 +10,7 @@ struct kmem_cache { struct array_cache __percpu *cpu_cache; + struct kmem_cache_attr *attr; /* 1) Cache tunables. Protected by slab_mutex */ unsigned int batchcount; @@ -35,14 +36,9 @@ struct kmem_cache { struct kmem_cache *freelist_cache; unsigned int freelist_size; - /* constructor func */ - void (*ctor)(void *obj); - /* 4) cache creation/removal */ - const char *name; struct list_head list; int refcount; - int object_size; int align; /* 5) statistics */ Index: linux/include/linux/slub_def.h =================================================================== --- linux.orig/include/linux/slub_def.h +++ linux/include/linux/slub_def.h @@ -83,9 +83,9 @@ struct kmem_cache { struct kmem_cache_cpu __percpu *cpu_slab; /* Used for retriving partial slabs etc */ slab_flags_t flags; + struct kmem_cache_attr *attr; unsigned long min_partial; int size; /* The size of an object including meta data */ - int object_size; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ #ifdef CONFIG_SLUB_CPU_PARTIAL int cpu_partial; /* Number of per cpu partial objects to keep around */ @@ -97,12 +97,10 @@ struct kmem_cache { struct kmem_cache_order_objects min; gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ int reserved; /* Reserved bytes at the end of slabs */ int red_left_pad; /* Left redzone padding size */ - const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ Index: linux/mm/slab.h =================================================================== --- linux.orig/mm/slab.h +++ linux/mm/slab.h @@ -18,13 +18,11 @@ * SLUB is no longer needed. */ struct kmem_cache { - unsigned int object_size;/* The original size of the object */ + struct kmem_cache_attr *attr unsigned int size; /* The aligned/padded/added on size */ unsigned int align; /* Alignment as calculated */ slab_flags_t flags; /* Active flags on the slab */ - const char *name; /* Slab name for sysfs */ int refcount; /* Use counter */ - void (*ctor)(void *); /* Called on object slot creation */ struct list_head list; /* List of all slab caches on the system */ };