diff mbox series

[RFC,v3,27/35] arm64: mte: Handle tag storage pages mapped in an MTE VMA

Message ID 20240125164256.4147-28-alexandru.elisei@arm.com (mailing list archive)
State New, archived
Headers show
Series Add support for arm64 MTE dynamic tag storage reuse | expand

Commit Message

Alexandru Elisei Jan. 25, 2024, 4:42 p.m. UTC
Tag stoarge pages cannot be tagged. When such a page is mapped in a
MTE-enabled VMA, migrate it out directly and don't try to reserve tag
storage for it.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/include/asm/mte_tag_storage.h |  1 +
 arch/arm64/kernel/mte_tag_storage.c      | 15 +++++++++++++++
 arch/arm64/mm/fault.c                    | 11 +++++++++--
 3 files changed, 25 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/mte_tag_storage.h b/arch/arm64/include/asm/mte_tag_storage.h
index 6d0f6ffcfdd6..50bdae94cf71 100644
--- a/arch/arm64/include/asm/mte_tag_storage.h
+++ b/arch/arm64/include/asm/mte_tag_storage.h
@@ -32,6 +32,7 @@  int reserve_tag_storage(struct page *page, int order, gfp_t gfp);
 void free_tag_storage(struct page *page, int order);
 
 bool page_tag_storage_reserved(struct page *page);
+bool page_is_tag_storage(struct page *page);
 
 vm_fault_t handle_folio_missing_tag_storage(struct folio *folio, struct vm_fault *vmf,
 					    bool *map_pte);
diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
index 1c8469781870..afe2bb754879 100644
--- a/arch/arm64/kernel/mte_tag_storage.c
+++ b/arch/arm64/kernel/mte_tag_storage.c
@@ -492,6 +492,21 @@  bool page_tag_storage_reserved(struct page *page)
 	return test_bit(PG_tag_storage_reserved, &page->flags);
 }
 
+bool page_is_tag_storage(struct page *page)
+{
+	unsigned long pfn = page_to_pfn(page);
+	struct range *tag_range;
+	int i;
+
+	for (i = 0; i < num_tag_regions; i++) {
+		tag_range = &tag_regions[i].tag_range;
+		if (tag_range->start <= pfn && pfn <= tag_range->end)
+			return true;
+	}
+
+	return false;
+}
+
 int reserve_tag_storage(struct page *page, int order, gfp_t gfp)
 {
 	unsigned long start_block, end_block;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1db3adb6499f..01450ab91a87 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -1014,6 +1014,7 @@  static int replace_folio_with_tagged(struct folio *folio)
 vm_fault_t handle_folio_missing_tag_storage(struct folio *folio, struct vm_fault *vmf,
 					    bool *map_pte)
 {
+	bool is_tag_storage = page_is_tag_storage(folio_page(folio, 0));
 	struct vm_area_struct *vma = vmf->vma;
 	int ret = 0;
 
@@ -1033,12 +1034,18 @@  vm_fault_t handle_folio_missing_tag_storage(struct folio *folio, struct vm_fault
 	if (unlikely(is_migrate_isolate_page(folio_page(folio, 0))))
 		goto out_retry;
 
-	ret = reserve_tag_storage(folio_page(folio, 0), folio_order(folio), GFP_HIGHUSER_MOVABLE);
-	if (ret) {
+	if (!is_tag_storage) {
+		ret = reserve_tag_storage(folio_page(folio, 0), folio_order(folio),
+					  GFP_HIGHUSER_MOVABLE);
+		if (!ret)
+			goto out_map;
+
 		/* replace_folio_with_tagged() is expensive, try to avoid it. */
 		if (fault_flag_allow_retry_first(vmf->flags))
 			goto out_retry;
+	}
 
+	if (ret || is_tag_storage) {
 		replace_folio_with_tagged(folio);
 		return 0;
 	}