@@ -938,9 +938,13 @@ static inline int head_compound_pincount(struct page *head)
static inline void set_compound_order(struct page *page, unsigned int order)
{
+ unsigned long shift = (1U << order);
page[1].compound_order = order;
#ifdef CONFIG_64BIT
- page[1].compound_nr = 1U << order;
+ // Branchless conditional:
+ // // order > 0 --> compound_nr = shift
+ // // order == 0 --> compound_nr = 0
+ page[1].compound_nr = shift ^ (-order ^ shift) & shift;
#endif
}
@@ -1344,9 +1344,6 @@ static void __destroy_compound_gigantic_page(struct page *page,
}
set_compound_order(page, 0);
-#ifdef CONFIG_64BIT
- page[1].compound_nr = 0;
-#endif
__ClearPageHead(page);
}
@@ -1865,9 +1862,6 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
__ClearPageReserved(p);
}
set_compound_order(page, 0);
-#ifdef CONFIG_64BIT
- page[1].compound_nr = 0;
-#endif
__ClearPageHead(page);
return false;
}
Since commit 1378a5ee451a ("mm: store compound_nr as well as compound_order") the page[1].compound_nr must be explicitly set to 0 if calling set_compound_order(page, 0). This can lead to bugs if the caller of set_compound_order(page, 0) forgets to explicitly set compound_nr=0. An example of this is commit ba9c1201beaa ("mm/hugetlb: clear compound_nr before freeing gigantic pages") Collapse these calls into the set_compound_order by utilizing branchless bitmaths [1]. [1] https://graphics.stanford.edu/~seander/bithacks.html#ConditionalSetOrClearBitsWithoutBranching Signed-off-by: Nico Pache <npache@redhat.com> Author: Nico Pache <npache@redhat.com> --- include/linux/mm.h | 6 +++++- mm/hugetlb.c | 6 ------ 2 files changed, 5 insertions(+), 7 deletions(-)