@@ -4408,7 +4408,9 @@ module_init(slab_proc_init);
#ifdef CONFIG_HARDENED_USERCOPY
/*
- * Rejects objects that are incorrectly sized.
+ * Rejects incorrectly sized objects and objects that are to be copied
+ * to/from userspace but do not fall entirely within the containing slab
+ * cache's usercopy region.
*
* Returns NULL if check passes, otherwise const char * to name of cache
* to indicate an error.
@@ -4428,11 +4430,15 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
/* Find offset within object. */
offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
- /* Allow address range falling entirely within object size. */
- if (offset <= cachep->object_size && n <= cachep->object_size - offset)
- return NULL;
+ /* Make sure object falls entirely within cache's usercopy region. */
+ if (offset < cachep->useroffset)
+ return cachep->name;
+ if (offset - cachep->useroffset > cachep->usersize)
+ return cachep->name;
+ if (n > cachep->useroffset - offset + cachep->usersize)
+ return cachep->name;
- return cachep->name;
+ return NULL;
}
#endif /* CONFIG_HARDENED_USERCOPY */
@@ -3833,7 +3833,9 @@ EXPORT_SYMBOL(__kmalloc_node);
#ifdef CONFIG_HARDENED_USERCOPY
/*
- * Rejects objects that are incorrectly sized.
+ * Rejects incorrectly sized objects and objects that are to be copied
+ * to/from userspace but do not fall entirely within the containing slab
+ * cache's usercopy region.
*
* Returns NULL if check passes, otherwise const char * to name of cache
* to indicate an error.
@@ -3843,11 +3845,9 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
{
struct kmem_cache *s;
unsigned long offset;
- size_t object_size;
/* Find object and usable object size. */
s = page->slab_cache;
- object_size = slab_ksize(s);
/* Reject impossible pointers. */
if (ptr < page_address(page))
@@ -3863,11 +3863,15 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
offset -= s->red_left_pad;
}
- /* Allow address range falling entirely within object size. */
- if (offset <= object_size && n <= object_size - offset)
- return NULL;
+ /* Make sure object falls entirely within cache's usercopy region. */
+ if (offset < s->useroffset)
+ return s->name;
+ if (offset - s->useroffset > s->usersize)
+ return s->name;
+ if (n > s->useroffset - offset + s->usersize)
+ return s->name;
- return s->name;
+ return NULL;
}
#endif /* CONFIG_HARDENED_USERCOPY */
@@ -58,6 +58,18 @@ static noinline int check_stack_object(const void *obj, unsigned long len)
return GOOD_STACK;
}
+/*
+ * If this function is reached, then CONFIG_HARDENED_USERCOPY has found an
+ * unexpected state during a copy_from_user() or copy_to_user() call.
+ * There are several checks being performed on the buffer by the
+ * __check_object_size() function. Normal stack buffer usage should never
+ * trip the checks, and kernel text addressing will always trip the check.
+ * For cache objects, it is checking that only the whitelisted range of
+ * bytes for a given cache is being accessed (via the cache's usersize and
+ * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
+ * kmem_cache_create_usercopy() function to create the cache (and
+ * carefully audit the whitelist range).
+ */
static void report_usercopy(const void *ptr, unsigned long len,
bool to_user, const char *type)
{