@@ -163,7 +163,6 @@ static inline void check_page_span(const void *ptr, unsigned long n,
{
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
const void *end = ptr + n - 1;
- struct page *endpage;
bool is_reserved, is_cma;
/*
@@ -194,11 +193,6 @@ static inline void check_page_span(const void *ptr, unsigned long n,
((unsigned long)end & (unsigned long)PAGE_MASK)))
return;
- /* Allow if fully inside the same compound (__GFP_COMP) page. */
- endpage = virt_to_head_page(end);
- if (likely(endpage == page))
- return;
-
/*
* Reject if range is entirely either Reserved (i.e. special or
* device memory), or CMA. Otherwise, reject since the object spans
@@ -258,6 +252,11 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
if (PageSlab(page)) {
/* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, page, to_user);
+ } else if (PageHead(page)) {
+ /* A compound allocation */
+ unsigned long offset = ptr - page_address(page);
+ if (offset + n > page_size(page))
+ usercopy_abort("page alloc", NULL, to_user, offset, n);
} else {
/* Verify object does not incorrectly span multiple pages. */
check_page_span(ptr, n, page, to_user);