diff mbox series

[RFC,v3,22/35] arm64: mte: Enable tag storage if CMA areas have been activated

Message ID 20240125164256.4147-23-alexandru.elisei@arm.com (mailing list archive)
State New
Headers show
Series Add support for arm64 MTE dynamic tag storage reuse | expand

Commit Message

Alexandru Elisei Jan. 25, 2024, 4:42 p.m. UTC
Before enabling MTE tag storage management, make sure that the CMA areas
have been successfully activated. If a CMA area fails activation, the pages
are kept as reserved. Reserved pages are never used by the page allocator.

If this happens, the kernel would have to manage tag storage only for some
of the memory, but not for all memory, and that would make the code
unreasonably complicated.

Choose to disable tag storage management altogether if a CMA area fails to
be activated.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---

Changes since v2:

* New patch.

 arch/arm64/include/asm/mte_tag_storage.h | 12 ++++++
 arch/arm64/kernel/mte_tag_storage.c      | 50 ++++++++++++++++++++++++
 2 files changed, 62 insertions(+)

Comments

Evgenii Stepanov Feb. 2, 2024, 10:30 p.m. UTC | #1
On Thu, Jan 25, 2024 at 8:44 AM Alexandru Elisei
<alexandru.elisei@arm.com> wrote:
>
> Before enabling MTE tag storage management, make sure that the CMA areas
> have been successfully activated. If a CMA area fails activation, the pages
> are kept as reserved. Reserved pages are never used by the page allocator.
>
> If this happens, the kernel would have to manage tag storage only for some
> of the memory, but not for all memory, and that would make the code
> unreasonably complicated.
>
> Choose to disable tag storage management altogether if a CMA area fails to
> be activated.
>
> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> ---
>
> Changes since v2:
>
> * New patch.
>
>  arch/arm64/include/asm/mte_tag_storage.h | 12 ++++++
>  arch/arm64/kernel/mte_tag_storage.c      | 50 ++++++++++++++++++++++++
>  2 files changed, 62 insertions(+)
>
> diff --git a/arch/arm64/include/asm/mte_tag_storage.h b/arch/arm64/include/asm/mte_tag_storage.h
> index 3c2cd29e053e..7b3f6bff8e6f 100644
> --- a/arch/arm64/include/asm/mte_tag_storage.h
> +++ b/arch/arm64/include/asm/mte_tag_storage.h
> @@ -6,8 +6,20 @@
>  #define __ASM_MTE_TAG_STORAGE_H
>
>  #ifdef CONFIG_ARM64_MTE_TAG_STORAGE
> +
> +DECLARE_STATIC_KEY_FALSE(tag_storage_enabled_key);
> +
> +static inline bool tag_storage_enabled(void)
> +{
> +       return static_branch_likely(&tag_storage_enabled_key);
> +}
> +
>  void mte_init_tag_storage(void);
>  #else
> +static inline bool tag_storage_enabled(void)
> +{
> +       return false;
> +}
>  static inline void mte_init_tag_storage(void)
>  {
>  }
> diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
> index 9a1a8a45171e..d58c68b4a849 100644
> --- a/arch/arm64/kernel/mte_tag_storage.c
> +++ b/arch/arm64/kernel/mte_tag_storage.c
> @@ -19,6 +19,8 @@
>
>  #include <asm/mte_tag_storage.h>
>
> +__ro_after_init DEFINE_STATIC_KEY_FALSE(tag_storage_enabled_key);
> +
>  struct tag_region {
>         struct range mem_range; /* Memory associated with the tag storage, in PFNs. */
>         struct range tag_range; /* Tag storage memory, in PFNs. */
> @@ -314,3 +316,51 @@ void __init mte_init_tag_storage(void)
>         num_tag_regions = 0;
>         pr_info("MTE tag storage region management disabled");
>  }
> +
> +static int __init mte_enable_tag_storage(void)
> +{
> +       struct range *tag_range;
> +       struct cma *cma;
> +       int i, ret;
> +
> +       if (num_tag_regions == 0)
> +               return 0;
> +
> +       for (i = 0; i < num_tag_regions; i++) {
> +               tag_range = &tag_regions[i].tag_range;
> +               cma = tag_regions[i].cma;
> +               /*
> +                * CMA will keep the pages as reserved when the region fails
> +                * activation.
> +                */
> +               if (PageReserved(pfn_to_page(tag_range->start)))
> +                       goto out_disabled;
> +       }
> +
> +       static_branch_enable(&tag_storage_enabled_key);
> +       pr_info("MTE tag storage region management enabled");
> +
> +       return 0;
> +
> +out_disabled:
> +       for (i = 0; i < num_tag_regions; i++) {
> +               tag_range = &tag_regions[i].tag_range;
> +               cma = tag_regions[i].cma;
> +
> +               if (PageReserved(pfn_to_page(tag_range->start)))
> +                       continue;
> +
> +               /* Try really hard to reserve the tag storage. */
> +               ret = cma_alloc(cma, range_len(tag_range), 8, true);
> +               /*
> +                * Tag storage is still in use for data, memory and/or tag
> +                * corruption will ensue.
> +                */
> +               WARN_ON_ONCE(ret);

cma_alloc returns (page *), so this condition needs to be inverted,
and the type of `ret` changed.
Not sure how it slipped through, this is a compile error with clang.

> +       }
> +       num_tag_regions = 0;
> +       pr_info("MTE tag storage region management disabled");
> +
> +       return -EINVAL;
> +}
> +arch_initcall(mte_enable_tag_storage);
> --
> 2.43.0
>
Alexandru Elisei Feb. 5, 2024, 4:30 p.m. UTC | #2
Hi Evgenii,

On Fri, Feb 02, 2024 at 02:30:00PM -0800, Evgenii Stepanov wrote:
> On Thu, Jan 25, 2024 at 8:44 AM Alexandru Elisei
> <alexandru.elisei@arm.com> wrote:
> >
> > Before enabling MTE tag storage management, make sure that the CMA areas
> > have been successfully activated. If a CMA area fails activation, the pages
> > are kept as reserved. Reserved pages are never used by the page allocator.
> >
> > If this happens, the kernel would have to manage tag storage only for some
> > of the memory, but not for all memory, and that would make the code
> > unreasonably complicated.
> >
> > Choose to disable tag storage management altogether if a CMA area fails to
> > be activated.
> >
> > Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> > ---
> >
> > Changes since v2:
> >
> > * New patch.
> >
> >  arch/arm64/include/asm/mte_tag_storage.h | 12 ++++++
> >  arch/arm64/kernel/mte_tag_storage.c      | 50 ++++++++++++++++++++++++
> >  2 files changed, 62 insertions(+)
> >
> > diff --git a/arch/arm64/include/asm/mte_tag_storage.h b/arch/arm64/include/asm/mte_tag_storage.h
> > index 3c2cd29e053e..7b3f6bff8e6f 100644
> > --- a/arch/arm64/include/asm/mte_tag_storage.h
> > +++ b/arch/arm64/include/asm/mte_tag_storage.h
> > @@ -6,8 +6,20 @@
> >  #define __ASM_MTE_TAG_STORAGE_H
> >
> >  #ifdef CONFIG_ARM64_MTE_TAG_STORAGE
> > +
> > +DECLARE_STATIC_KEY_FALSE(tag_storage_enabled_key);
> > +
> > +static inline bool tag_storage_enabled(void)
> > +{
> > +       return static_branch_likely(&tag_storage_enabled_key);
> > +}
> > +
> >  void mte_init_tag_storage(void);
> >  #else
> > +static inline bool tag_storage_enabled(void)
> > +{
> > +       return false;
> > +}
> >  static inline void mte_init_tag_storage(void)
> >  {
> >  }
> > diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
> > index 9a1a8a45171e..d58c68b4a849 100644
> > --- a/arch/arm64/kernel/mte_tag_storage.c
> > +++ b/arch/arm64/kernel/mte_tag_storage.c
> > @@ -19,6 +19,8 @@
> >
> >  #include <asm/mte_tag_storage.h>
> >
> > +__ro_after_init DEFINE_STATIC_KEY_FALSE(tag_storage_enabled_key);
> > +
> >  struct tag_region {
> >         struct range mem_range; /* Memory associated with the tag storage, in PFNs. */
> >         struct range tag_range; /* Tag storage memory, in PFNs. */
> > @@ -314,3 +316,51 @@ void __init mte_init_tag_storage(void)
> >         num_tag_regions = 0;
> >         pr_info("MTE tag storage region management disabled");
> >  }
> > +
> > +static int __init mte_enable_tag_storage(void)
> > +{
> > +       struct range *tag_range;
> > +       struct cma *cma;
> > +       int i, ret;
> > +
> > +       if (num_tag_regions == 0)
> > +               return 0;
> > +
> > +       for (i = 0; i < num_tag_regions; i++) {
> > +               tag_range = &tag_regions[i].tag_range;
> > +               cma = tag_regions[i].cma;
> > +               /*
> > +                * CMA will keep the pages as reserved when the region fails
> > +                * activation.
> > +                */
> > +               if (PageReserved(pfn_to_page(tag_range->start)))
> > +                       goto out_disabled;
> > +       }
> > +
> > +       static_branch_enable(&tag_storage_enabled_key);
> > +       pr_info("MTE tag storage region management enabled");
> > +
> > +       return 0;
> > +
> > +out_disabled:
> > +       for (i = 0; i < num_tag_regions; i++) {
> > +               tag_range = &tag_regions[i].tag_range;
> > +               cma = tag_regions[i].cma;
> > +
> > +               if (PageReserved(pfn_to_page(tag_range->start)))
> > +                       continue;
> > +
> > +               /* Try really hard to reserve the tag storage. */
> > +               ret = cma_alloc(cma, range_len(tag_range), 8, true);
> > +               /*
> > +                * Tag storage is still in use for data, memory and/or tag
> > +                * corruption will ensue.
> > +                */
> > +               WARN_ON_ONCE(ret);
> 
> cma_alloc returns (page *), so this condition needs to be inverted,
> and the type of `ret` changed.
> Not sure how it slipped through, this is a compile error with clang.

Checked just now, it's a warning with gcc, I must have missed it. Will fix.

Thanks,
Alex

> 
> > +       }
> > +       num_tag_regions = 0;
> > +       pr_info("MTE tag storage region management disabled");
> > +
> > +       return -EINVAL;
> > +}
> > +arch_initcall(mte_enable_tag_storage);
> > --
> > 2.43.0
> >
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/mte_tag_storage.h b/arch/arm64/include/asm/mte_tag_storage.h
index 3c2cd29e053e..7b3f6bff8e6f 100644
--- a/arch/arm64/include/asm/mte_tag_storage.h
+++ b/arch/arm64/include/asm/mte_tag_storage.h
@@ -6,8 +6,20 @@ 
 #define __ASM_MTE_TAG_STORAGE_H
 
 #ifdef CONFIG_ARM64_MTE_TAG_STORAGE
+
+DECLARE_STATIC_KEY_FALSE(tag_storage_enabled_key);
+
+static inline bool tag_storage_enabled(void)
+{
+	return static_branch_likely(&tag_storage_enabled_key);
+}
+
 void mte_init_tag_storage(void);
 #else
+static inline bool tag_storage_enabled(void)
+{
+	return false;
+}
 static inline void mte_init_tag_storage(void)
 {
 }
diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
index 9a1a8a45171e..d58c68b4a849 100644
--- a/arch/arm64/kernel/mte_tag_storage.c
+++ b/arch/arm64/kernel/mte_tag_storage.c
@@ -19,6 +19,8 @@ 
 
 #include <asm/mte_tag_storage.h>
 
+__ro_after_init DEFINE_STATIC_KEY_FALSE(tag_storage_enabled_key);
+
 struct tag_region {
 	struct range mem_range;	/* Memory associated with the tag storage, in PFNs. */
 	struct range tag_range;	/* Tag storage memory, in PFNs. */
@@ -314,3 +316,51 @@  void __init mte_init_tag_storage(void)
 	num_tag_regions = 0;
 	pr_info("MTE tag storage region management disabled");
 }
+
+static int __init mte_enable_tag_storage(void)
+{
+	struct range *tag_range;
+	struct cma *cma;
+	int i, ret;
+
+	if (num_tag_regions == 0)
+		return 0;
+
+	for (i = 0; i < num_tag_regions; i++) {
+		tag_range = &tag_regions[i].tag_range;
+		cma = tag_regions[i].cma;
+		/*
+		 * CMA will keep the pages as reserved when the region fails
+		 * activation.
+		 */
+		if (PageReserved(pfn_to_page(tag_range->start)))
+			goto out_disabled;
+	}
+
+	static_branch_enable(&tag_storage_enabled_key);
+	pr_info("MTE tag storage region management enabled");
+
+	return 0;
+
+out_disabled:
+	for (i = 0; i < num_tag_regions; i++) {
+		tag_range = &tag_regions[i].tag_range;
+		cma = tag_regions[i].cma;
+
+		if (PageReserved(pfn_to_page(tag_range->start)))
+			continue;
+
+		/* Try really hard to reserve the tag storage. */
+		ret = cma_alloc(cma, range_len(tag_range), 8, true);
+		/*
+		 * Tag storage is still in use for data, memory and/or tag
+		 * corruption will ensue.
+		 */
+		WARN_ON_ONCE(ret);
+	}
+	num_tag_regions = 0;
+	pr_info("MTE tag storage region management disabled");
+
+	return -EINVAL;
+}
+arch_initcall(mte_enable_tag_storage);