@@ -572,6 +572,23 @@ int reserve_tag_storage(struct page *page, int order, gfp_t gfp)
break;
}
+ /*
+ * alloc_contig_range() returns -EINTR from
+ * __alloc_contig_migrate_range() if a fatal signal is pending.
+ * As long as the signal hasn't been handled, it is impossible
+ * to reserve tag storage for any page. Stop trying to reserve
+ * tag storage, but return 0 so the page allocator can make
+ * forward progress, instead of printing an OOM splat.
+ *
+ * The tagged page with missing tag storage will be mapped with
+ * PAGE_FAULT_ON_ACCESS in set_pte_at(), which means accesses
+ * until the signal is delivered will cause a fault.
+ */
+ if (ret == -EINTR) {
+ ret = 0;
+ goto out_error;
+ }
+
if (ret)
goto out_error;
@@ -950,6 +950,11 @@ gfp_t arch_calc_vma_gfp(struct vm_area_struct *vma, gfp_t gfp)
void tag_clear_highpage(struct page *page)
{
+ if (tag_storage_enabled() && unlikely(!page_tag_storage_reserved(page))) {
+ clear_page(page_address(page));
+ return;
+ }
+
/* Newly allocated page, shouldn't have been tagged yet */
WARN_ON_ONCE(!try_page_mte_tagging(page));
mte_zero_clear_page_tags(page_address(page));
As long as a fatal signal is pending, alloc_contig_range() will fail with -EINTR. This makes it impossible for tag storage allocation to succeed, and the page allocator will print an OOM splat. The process is going to be killed, so return 0 (success) from reserve_tag_storage() to allow the page allocator to make progress. set_pte_at() will map it with PAGE_FAULT_ON_ACCESS and subsequent accesses from different threads will cause a fault until the signal is delivered. Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> --- arch/arm64/kernel/mte_tag_storage.c | 17 +++++++++++++++++ arch/arm64/mm/fault.c | 5 +++++ 2 files changed, 22 insertions(+)