@@ -37,6 +37,15 @@ bool page_is_tag_storage(struct page *page);
vm_fault_t handle_page_missing_tag_storage(struct vm_fault *vmf);
vm_fault_t handle_huge_page_missing_tag_storage(struct vm_fault *vmf);
+
+void tags_by_pfn_lock(void);
+void tags_by_pfn_unlock(void);
+
+void *mte_erase_tags_for_pfn(unsigned long pfn);
+bool mte_save_tags_for_pfn(void *tags, unsigned long pfn);
+void mte_restore_tags_for_pfn(unsigned long start_pfn, int order);
+
+vm_fault_t mte_try_transfer_swap_tags(swp_entry_t entry, struct page *page);
#else
static inline bool tag_storage_enabled(void)
{
@@ -1084,6 +1084,17 @@ static inline void arch_swap_invalidate_area(int type)
mte_invalidate_tags_area_by_swp_entry(type);
}
+#ifdef CONFIG_ARM64_MTE_TAG_STORAGE
+#define __HAVE_ARCH_SWAP_PREPARE_TO_RESTORE
+static inline vm_fault_t arch_swap_prepare_to_restore(swp_entry_t entry,
+ struct folio *folio)
+{
+ if (tag_storage_enabled())
+ return mte_try_transfer_swap_tags(entry, &folio->page);
+ return 0;
+}
+#endif
+
#define __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{
@@ -547,8 +547,10 @@ int reserve_tag_storage(struct page *page, int order, gfp_t gfp)
mutex_lock(&tag_blocks_lock);
/* Check again, this time with the lock held. */
- if (page_tag_storage_reserved(page))
- goto out_unlock;
+ if (page_tag_storage_reserved(page)) {
+ mutex_unlock(&tag_blocks_lock);
+ return 0;
+ }
/* Make sure existing entries are not freed from out under out feet. */
xa_lock_irqsave(&tag_blocks_reserved, flags);
@@ -583,9 +585,10 @@ int reserve_tag_storage(struct page *page, int order, gfp_t gfp)
}
page_set_tag_storage_reserved(page, order);
-out_unlock:
mutex_unlock(&tag_blocks_lock);
+ mte_restore_tags_for_pfn(page_to_pfn(page), order);
+
return 0;
out_error:
@@ -612,7 +615,8 @@ void free_tag_storage(struct page *page, int order)
struct tag_region *region;
unsigned long page_va;
unsigned long flags;
- int ret;
+ void *tags;
+ int i, ret;
ret = tag_storage_find_block(page, &start_block, ®ion);
if (WARN_ONCE(ret, "Missing tag storage block for pfn 0x%lx", page_to_pfn(page)))
@@ -622,6 +626,14 @@ void free_tag_storage(struct page *page, int order)
/* Avoid writeback of dirty tag cache lines corrupting data. */
dcache_inval_tags_poc(page_va, page_va + (PAGE_SIZE << order));
+ tags_by_pfn_lock();
+ for (i = 0; i < (1 << order); i++) {
+ tags = mte_erase_tags_for_pfn(page_to_pfn(page + i));
+ if (unlikely(tags))
+ mte_free_tag_buf(tags);
+ }
+ tags_by_pfn_unlock();
+
end_block = start_block + order_to_num_blocks(order) * region->block_size;
xa_lock_irqsave(&tag_blocks_reserved, flags);
@@ -20,6 +20,114 @@ void mte_free_tag_buf(void *buf)
kfree(buf);
}
+#ifdef CONFIG_ARM64_MTE_TAG_STORAGE
+static DEFINE_XARRAY(tags_by_pfn);
+
+void tags_by_pfn_lock(void)
+{
+ xa_lock(&tags_by_pfn);
+}
+
+void tags_by_pfn_unlock(void)
+{
+ xa_unlock(&tags_by_pfn);
+}
+
+void *mte_erase_tags_for_pfn(unsigned long pfn)
+{
+ return __xa_erase(&tags_by_pfn, pfn);
+}
+
+bool mte_save_tags_for_pfn(void *tags, unsigned long pfn)
+{
+ void *entry;
+ int ret;
+
+ ret = xa_reserve(&tags_by_pfn, pfn, GFP_KERNEL);
+ if (ret)
+ return true;
+
+ tags_by_pfn_lock();
+
+ if (page_tag_storage_reserved(pfn_to_page(pfn))) {
+ tags_by_pfn_unlock();
+ return false;
+ }
+
+ entry = __xa_store(&tags_by_pfn, pfn, tags, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ xa_release(&tags_by_pfn, pfn);
+ goto out_unlock;
+ } else if (entry) {
+ mte_free_tag_buf(entry);
+ }
+
+out_unlock:
+ tags_by_pfn_unlock();
+ return true;
+}
+
+void mte_restore_tags_for_pfn(unsigned long start_pfn, int order)
+{
+ struct page *page = pfn_to_page(start_pfn);
+ unsigned long pfn;
+ void *tags;
+
+ tags_by_pfn_lock();
+
+ for (pfn = start_pfn; pfn < start_pfn + (1 << order); pfn++, page++) {
+ if (WARN_ON_ONCE(!page_tag_storage_reserved(page)))
+ continue;
+
+ tags = mte_erase_tags_for_pfn(pfn);
+ if (unlikely(tags)) {
+ /*
+ * Mark the page as tagged so mte_sync_tags() doesn't
+ * clear the tags.
+ */
+ WARN_ON_ONCE(!try_page_mte_tagging(page));
+ mte_copy_page_tags_from_buf(page_address(page), tags);
+ set_page_mte_tagged(page);
+ mte_free_tag_buf(tags);
+ }
+ }
+
+ tags_by_pfn_unlock();
+}
+
+/*
+ * Note on locking: swap in/out is done with the folio locked, which eliminates
+ * races with mte_save/restore_page_tags_by_swp_entry.
+ */
+vm_fault_t mte_try_transfer_swap_tags(swp_entry_t entry, struct page *page)
+{
+ void *swap_tags, *pfn_tags;
+ bool saved;
+
+ /*
+ * mte_restore_page_tags_by_swp_entry() will take care of copying the
+ * tags over.
+ */
+ if (likely(page_mte_tagged(page) || page_tag_storage_reserved(page)))
+ return 0;
+
+ swap_tags = xa_load(&tags_by_swp_entry, entry.val);
+ if (!swap_tags)
+ return 0;
+
+ pfn_tags = mte_allocate_tag_buf();
+ if (!pfn_tags)
+ return VM_FAULT_OOM;
+
+ memcpy(pfn_tags, swap_tags, MTE_PAGE_TAG_STORAGE_SIZE);
+ saved = mte_save_tags_for_pfn(pfn_tags, page_to_pfn(page));
+ if (!saved)
+ mte_free_tag_buf(pfn_tags);
+
+ return 0;
+}
+#endif
+
int mte_save_page_tags_by_swp_entry(struct page *page)
{
void *tags, *ret;
@@ -54,6 +162,10 @@ void mte_restore_page_tags_by_swp_entry(swp_entry_t entry, struct page *page)
if (!tags)
return;
+ /* Tags will be restored when tag storage is reserved. */
+ if (tag_storage_enabled() && unlikely(!page_tag_storage_reserved(page)))
+ return;
+
if (try_page_mte_tagging(page)) {
mte_copy_page_tags_from_buf(page_address(page), tags);
set_page_mte_tagged(page);
Linux restores tags when a page is swapped in and there are tags associated with the swap entry which the new page will replace. The saved tags are restored even if the page will not be mapped as tagged, to protect against cases where the page is shared between different VMAs, and is tagged in some, but untagged in others. By using this approach, the process can still access the correct tags following an mprotect(PROT_MTE) on the non-MTE enabled VMA. But this poses a challenge for managing tag storage: in the scenario above, when a new page is allocated to be swapped in for the process where it will be mapped as untagged, the corresponding tag storage block is not reserved. mte_restore_page_tags_by_swp_entry(), when it restores the saved tags, will overwrite data in the tag storage block associated with the new page, leading to data corruption if the block is in use by a process. Get around this issue by saving the tags in a new xarray, this time indexed by the page pfn, and then restoring them when tag storage is reserved for the page. Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> --- arch/arm64/include/asm/mte_tag_storage.h | 9 ++ arch/arm64/include/asm/pgtable.h | 11 +++ arch/arm64/kernel/mte_tag_storage.c | 20 +++- arch/arm64/mm/mteswap.c | 112 +++++++++++++++++++++++ 4 files changed, 148 insertions(+), 4 deletions(-)