@@ -187,7 +187,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
*/
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
-#define OO_SHIFT 16
+#define OO_SHIFT 15
#define OO_MASK ((1 << OO_SHIFT) - 1)
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
@@ -343,6 +343,8 @@ static inline unsigned int oo_order(struct kmem_cache_order_objects x)
static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
{
+ BUILD_BUG_ON(OO_MASK > MAX_OBJS_PER_PAGE);
+
return x.x & OO_MASK;
}
Mask of slub objects per page shouldn't be larger than what page->objects can hold. It requires more than 2^15 objects to hit the problem, and I don't think anybody would. It'd be nice to have the mask fixed, but not really worth cc'ing the stable. Fixes: 50d5c41cd151 ("slub: Do not use frozen page flag but a bit in the page counters") Signed-off-by: Yu Zhao <yuzhao@google.com> --- mm/slub.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)