diff mbox series

[RFC,v2,15/27] arm64: mte: Check that tag storage blocks are in the same zone

Message ID 20231119165721.9849-16-alexandru.elisei@arm.com (mailing list archive)
State New
Headers show
Series [RFC,v2,01/27] arm64: mte: Rework naming for tag manipulation functions | expand

Commit Message

Alexandru Elisei Nov. 19, 2023, 4:57 p.m. UTC
alloc_contig_range() requires that the requested pages are in the same
zone. Check that this is indeed the case before initializing the tag
storage blocks.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/kernel/mte_tag_storage.c | 33 +++++++++++++++++++++++++++++
 1 file changed, 33 insertions(+)

Comments

David Hildenbrand Nov. 24, 2023, 7:56 p.m. UTC | #1
On 19.11.23 17:57, Alexandru Elisei wrote:
> alloc_contig_range() requires that the requested pages are in the same
> zone. Check that this is indeed the case before initializing the tag
> storage blocks.
> 
> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> ---
>   arch/arm64/kernel/mte_tag_storage.c | 33 +++++++++++++++++++++++++++++
>   1 file changed, 33 insertions(+)
> 
> diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
> index 8b9bedf7575d..fd63430d4dc0 100644
> --- a/arch/arm64/kernel/mte_tag_storage.c
> +++ b/arch/arm64/kernel/mte_tag_storage.c
> @@ -265,6 +265,35 @@ void __init mte_tag_storage_init(void)
>   	}
>   }
>   
> +/* alloc_contig_range() requires all pages to be in the same zone. */
> +static int __init mte_tag_storage_check_zone(void)
> +{
> +	struct range *tag_range;
> +	struct zone *zone;
> +	unsigned long pfn;
> +	u32 block_size;
> +	int i, j;
> +
> +	for (i = 0; i < num_tag_regions; i++) {
> +		block_size = tag_regions[i].block_size;
> +		if (block_size == 1)
> +			continue;
> +
> +		tag_range = &tag_regions[i].tag_range;
> +		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += block_size) {
> +			zone = page_zone(pfn_to_page(pfn));
> +			for (j = 1; j < block_size; j++) {
> +				if (page_zone(pfn_to_page(pfn + j)) != zone) {
> +					pr_err("Tag storage block pages in different zones");
> +					return -EINVAL;
> +				}
> +			}
> +		}
> +	}
> +
> +	 return 0;
> +}
> +

Looks like something that ordinary CMA provides. See cma_activate_area().

Can't we find a way to let CMA do CMA thingies and only be a user of 
that? What would be required to make the performance issue you spelled 
out in the cover letter be gone and not have to open-code that in arch code?
Alexandru Elisei Nov. 27, 2023, 3:10 p.m. UTC | #2
Hi,

On Fri, Nov 24, 2023 at 08:56:59PM +0100, David Hildenbrand wrote:
> On 19.11.23 17:57, Alexandru Elisei wrote:
> > alloc_contig_range() requires that the requested pages are in the same
> > zone. Check that this is indeed the case before initializing the tag
> > storage blocks.
> > 
> > Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> > ---
> >   arch/arm64/kernel/mte_tag_storage.c | 33 +++++++++++++++++++++++++++++
> >   1 file changed, 33 insertions(+)
> > 
> > diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
> > index 8b9bedf7575d..fd63430d4dc0 100644
> > --- a/arch/arm64/kernel/mte_tag_storage.c
> > +++ b/arch/arm64/kernel/mte_tag_storage.c
> > @@ -265,6 +265,35 @@ void __init mte_tag_storage_init(void)
> >   	}
> >   }
> > +/* alloc_contig_range() requires all pages to be in the same zone. */
> > +static int __init mte_tag_storage_check_zone(void)
> > +{
> > +	struct range *tag_range;
> > +	struct zone *zone;
> > +	unsigned long pfn;
> > +	u32 block_size;
> > +	int i, j;
> > +
> > +	for (i = 0; i < num_tag_regions; i++) {
> > +		block_size = tag_regions[i].block_size;
> > +		if (block_size == 1)
> > +			continue;
> > +
> > +		tag_range = &tag_regions[i].tag_range;
> > +		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += block_size) {
> > +			zone = page_zone(pfn_to_page(pfn));
> > +			for (j = 1; j < block_size; j++) {
> > +				if (page_zone(pfn_to_page(pfn + j)) != zone) {
> > +					pr_err("Tag storage block pages in different zones");
> > +					return -EINVAL;
> > +				}
> > +			}
> > +		}
> > +	}
> > +
> > +	 return 0;
> > +}
> > +
> 
> Looks like something that ordinary CMA provides. See cma_activate_area().

Indeed.

> 
> Can't we find a way to let CMA do CMA thingies and only be a user of that?
> What would be required to make the performance issue you spelled out in the
> cover letter be gone and not have to open-code that in arch code?

I've replied with a possible solution here [1].

[1] https://lore.kernel.org/all/ZWSvMYMjFLFZ-abv@raptor/

Thanks,
Alex

> 
> -- 
> Cheers,
> 
> David / dhildenb
>
Hyesoo Yu Nov. 29, 2023, 8:57 a.m. UTC | #3
On Sun, Nov 19, 2023 at 04:57:09PM +0000, Alexandru Elisei wrote:
> alloc_contig_range() requires that the requested pages are in the same
> zone. Check that this is indeed the case before initializing the tag
> storage blocks.
> 
> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> ---
>  arch/arm64/kernel/mte_tag_storage.c | 33 +++++++++++++++++++++++++++++
>  1 file changed, 33 insertions(+)
> 
> diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
> index 8b9bedf7575d..fd63430d4dc0 100644
> --- a/arch/arm64/kernel/mte_tag_storage.c
> +++ b/arch/arm64/kernel/mte_tag_storage.c
> @@ -265,6 +265,35 @@ void __init mte_tag_storage_init(void)
>  	}
>  }
>  
> +/* alloc_contig_range() requires all pages to be in the same zone. */
> +static int __init mte_tag_storage_check_zone(void)
> +{
> +	struct range *tag_range;
> +	struct zone *zone;
> +	unsigned long pfn;
> +	u32 block_size;
> +	int i, j;
> +
> +	for (i = 0; i < num_tag_regions; i++) {
> +		block_size = tag_regions[i].block_size;
> +		if (block_size == 1)
> +			continue;
> +
> +		tag_range = &tag_regions[i].tag_range;
> +		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += block_size) {
> +			zone = page_zone(pfn_to_page(pfn));

Hello.

Since the blocks within the tag_range must all be in the same zone, can we move the "page_zone"
out of the loop ?

Thanks,
Regards.

> +			for (j = 1; j < block_size; j++) {
> +				if (page_zone(pfn_to_page(pfn + j)) != zone) {
> +					pr_err("Tag storage block pages in different zones");
> +					return -EINVAL;
> +				}
> +			}
> +		}
> +	}
> +
> +	 return 0;
> +}
> +
>  static int __init mte_tag_storage_activate_regions(void)
>  {
>  	phys_addr_t dram_start, dram_end;
> @@ -321,6 +350,10 @@ static int __init mte_tag_storage_activate_regions(void)
>  		goto out_disabled;
>  	}
>  
> +	ret = mte_tag_storage_check_zone();
> +	if (ret)
> +		goto out_disabled;
> +
>  	for (i = 0; i < num_tag_regions; i++) {
>  		tag_range = &tag_regions[i].tag_range;
>  		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += pageblock_nr_pages)
> -- 
> 2.42.1
> 
>
Alexandru Elisei Nov. 30, 2023, noon UTC | #4
Hi,

On Wed, Nov 29, 2023 at 05:57:44PM +0900, Hyesoo Yu wrote:
> On Sun, Nov 19, 2023 at 04:57:09PM +0000, Alexandru Elisei wrote:
> > alloc_contig_range() requires that the requested pages are in the same
> > zone. Check that this is indeed the case before initializing the tag
> > storage blocks.
> > 
> > Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> > ---
> >  arch/arm64/kernel/mte_tag_storage.c | 33 +++++++++++++++++++++++++++++
> >  1 file changed, 33 insertions(+)
> > 
> > diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
> > index 8b9bedf7575d..fd63430d4dc0 100644
> > --- a/arch/arm64/kernel/mte_tag_storage.c
> > +++ b/arch/arm64/kernel/mte_tag_storage.c
> > @@ -265,6 +265,35 @@ void __init mte_tag_storage_init(void)
> >  	}
> >  }
> >  
> > +/* alloc_contig_range() requires all pages to be in the same zone. */
> > +static int __init mte_tag_storage_check_zone(void)
> > +{
> > +	struct range *tag_range;
> > +	struct zone *zone;
> > +	unsigned long pfn;
> > +	u32 block_size;
> > +	int i, j;
> > +
> > +	for (i = 0; i < num_tag_regions; i++) {
> > +		block_size = tag_regions[i].block_size;
> > +		if (block_size == 1)
> > +			continue;
> > +
> > +		tag_range = &tag_regions[i].tag_range;
> > +		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += block_size) {
> > +			zone = page_zone(pfn_to_page(pfn));
> 
> Hello.
> 
> Since the blocks within the tag_range must all be in the same zone, can we move the "page_zone"
> out of the loop ?

Hmm.. why do you say that the pages in a tag_range must be in the same
zone? I am not very familiar with how the memory management code puts pages
into zones, but I would imagine that pages in a tag range straddling the
4GB limit (so, let's say, from 3GB to 5GB) will end up in both ZONE_DMA and
ZONE_NORMAL.

Thanks,
Alex

> 
> Thanks,
> Regards.
> 
> > +			for (j = 1; j < block_size; j++) {
> > +				if (page_zone(pfn_to_page(pfn + j)) != zone) {
> > +					pr_err("Tag storage block pages in different zones");
> > +					return -EINVAL;
> > +				}
> > +			}
> > +		}
> > +	}
> > +
> > +	 return 0;
> > +}
> > +
> >  static int __init mte_tag_storage_activate_regions(void)
> >  {
> >  	phys_addr_t dram_start, dram_end;
> > @@ -321,6 +350,10 @@ static int __init mte_tag_storage_activate_regions(void)
> >  		goto out_disabled;
> >  	}
> >  
> > +	ret = mte_tag_storage_check_zone();
> > +	if (ret)
> > +		goto out_disabled;
> > +
> >  	for (i = 0; i < num_tag_regions; i++) {
> >  		tag_range = &tag_regions[i].tag_range;
> >  		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += pageblock_nr_pages)
> > -- 
> > 2.42.1
> > 
> >
Hyesoo Yu Dec. 8, 2023, 5:27 a.m. UTC | #5
Hi~

On Thu, Nov 30, 2023 at 12:00:11PM +0000, Alexandru Elisei wrote:
> Hi,
> 
> On Wed, Nov 29, 2023 at 05:57:44PM +0900, Hyesoo Yu wrote:
> > On Sun, Nov 19, 2023 at 04:57:09PM +0000, Alexandru Elisei wrote:
> > > alloc_contig_range() requires that the requested pages are in the same
> > > zone. Check that this is indeed the case before initializing the tag
> > > storage blocks.
> > > 
> > > Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> > > ---
> > >  arch/arm64/kernel/mte_tag_storage.c | 33 +++++++++++++++++++++++++++++
> > >  1 file changed, 33 insertions(+)
> > > 
> > > diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
> > > index 8b9bedf7575d..fd63430d4dc0 100644
> > > --- a/arch/arm64/kernel/mte_tag_storage.c
> > > +++ b/arch/arm64/kernel/mte_tag_storage.c
> > > @@ -265,6 +265,35 @@ void __init mte_tag_storage_init(void)
> > >  	}
> > >  }
> > >  
> > > +/* alloc_contig_range() requires all pages to be in the same zone. */
> > > +static int __init mte_tag_storage_check_zone(void)
> > > +{
> > > +	struct range *tag_range;
> > > +	struct zone *zone;
> > > +	unsigned long pfn;
> > > +	u32 block_size;
> > > +	int i, j;
> > > +
> > > +	for (i = 0; i < num_tag_regions; i++) {
> > > +		block_size = tag_regions[i].block_size;
> > > +		if (block_size == 1)
> > > +			continue;
> > > +
> > > +		tag_range = &tag_regions[i].tag_range;
> > > +		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += block_size) {
> > > +			zone = page_zone(pfn_to_page(pfn));
> > 
> > Hello.
> > 
> > Since the blocks within the tag_range must all be in the same zone, can we move the "page_zone"
> > out of the loop ?
> `
> Hmm.. why do you say that the pages in a tag_range must be in the same
> zone? I am not very familiar with how the memory management code puts pages
> into zones, but I would imagine that pages in a tag range straddling the
> 4GB limit (so, let's say, from 3GB to 5GB) will end up in both ZONE_DMA and
> ZONE_NORMAL.
> 
> Thanks,
> Alex
> 

Oh, I see that reserve_tag_storage only calls alloc_contig_rnage in units of block_size,
I thought it could be called for the entire range the page needed at once.
(Maybe it could be a bit faster ? It doesn't seem like unnecessary drain and
other operation are repeated.)

If we use the cma code when activating the tag storage, it will be error if the
entire area of tag region is not in the same zone, so there should be a constraint
that it must be in the same zone when defining the tag region on device tree.

Thanks,
Regards.

> > 
> > Thanks,
> > Regards.
> > 
> > > +			for (j = 1; j < block_size; j++) {
> > > +				if (page_zone(pfn_to_page(pfn + j)) != zone) {
> > > +					pr_err("Tag storage block pages in different zones");
> > > +					return -EINVAL;
> > > +				}
> > > +			}
> > > +		}
> > > +	}
> > > +
> > > +	 return 0;
> > > +}
> > > +
> > >  static int __init mte_tag_storage_activate_regions(void)
> > >  {
> > >  	phys_addr_t dram_start, dram_end;
> > > @@ -321,6 +350,10 @@ static int __init mte_tag_storage_activate_regions(void)
> > >  		goto out_disabled;
> > >  	}
> > >  
> > > +	ret = mte_tag_storage_check_zone();
> > > +	if (ret)
> > > +		goto out_disabled;
> > > +
> > >  	for (i = 0; i < num_tag_regions; i++) {
> > >  		tag_range = &tag_regions[i].tag_range;
> > >  		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += pageblock_nr_pages)
> > > -- 
> > > 2.42.1
> > > 
> > > 
> 
> 
>
Alexandru Elisei Dec. 11, 2023, 2:21 p.m. UTC | #6
Hi,

On Fri, Dec 08, 2023 at 02:27:39PM +0900, Hyesoo Yu wrote:
> Hi~
> 
> On Thu, Nov 30, 2023 at 12:00:11PM +0000, Alexandru Elisei wrote:
> > Hi,
> > 
> > On Wed, Nov 29, 2023 at 05:57:44PM +0900, Hyesoo Yu wrote:
> > > On Sun, Nov 19, 2023 at 04:57:09PM +0000, Alexandru Elisei wrote:
> > > > alloc_contig_range() requires that the requested pages are in the same
> > > > zone. Check that this is indeed the case before initializing the tag
> > > > storage blocks.
> > > > 
> > > > Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> > > > ---
> > > >  arch/arm64/kernel/mte_tag_storage.c | 33 +++++++++++++++++++++++++++++
> > > >  1 file changed, 33 insertions(+)
> > > > 
> > > > diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
> > > > index 8b9bedf7575d..fd63430d4dc0 100644
> > > > --- a/arch/arm64/kernel/mte_tag_storage.c
> > > > +++ b/arch/arm64/kernel/mte_tag_storage.c
> > > > @@ -265,6 +265,35 @@ void __init mte_tag_storage_init(void)
> > > >  	}
> > > >  }
> > > >  
> > > > +/* alloc_contig_range() requires all pages to be in the same zone. */
> > > > +static int __init mte_tag_storage_check_zone(void)
> > > > +{
> > > > +	struct range *tag_range;
> > > > +	struct zone *zone;
> > > > +	unsigned long pfn;
> > > > +	u32 block_size;
> > > > +	int i, j;
> > > > +
> > > > +	for (i = 0; i < num_tag_regions; i++) {
> > > > +		block_size = tag_regions[i].block_size;
> > > > +		if (block_size == 1)
> > > > +			continue;
> > > > +
> > > > +		tag_range = &tag_regions[i].tag_range;
> > > > +		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += block_size) {
> > > > +			zone = page_zone(pfn_to_page(pfn));
> > > 
> > > Hello.
> > > 
> > > Since the blocks within the tag_range must all be in the same zone, can we move the "page_zone"
> > > out of the loop ?
> > `
> > Hmm.. why do you say that the pages in a tag_range must be in the same
> > zone? I am not very familiar with how the memory management code puts pages
> > into zones, but I would imagine that pages in a tag range straddling the
> > 4GB limit (so, let's say, from 3GB to 5GB) will end up in both ZONE_DMA and
> > ZONE_NORMAL.
> > 
> > Thanks,
> > Alex
> > 
> 
> Oh, I see that reserve_tag_storage only calls alloc_contig_rnage in units of block_size,
> I thought it could be called for the entire range the page needed at once.
> (Maybe it could be a bit faster ? It doesn't seem like unnecessary drain and
> other operation are repeated.)

Yes, that might be useful to do. Worth keeping in mind is that:

- a number of block size pages at the start and end of the range might
  already be reserved for other tagged pages, so the actual range that is
  being reserved might end up being smaller that what we are expecting.

- the most common allocation order is smaller or equal to
  PAGE_ALLOC_COSTLY_ORDER, which is 3, which means that the most common
  case is that reserve_tag_storage reserves only one tag storage block.

I will definitely keep this optimization in mind, but I would prefer to get
the series into a more stable shape before looking at performance
optimizations.

> 
> If we use the cma code when activating the tag storage, it will be error if the
> entire area of tag region is not in the same zone, so there should be a constraint
> that it must be in the same zone when defining the tag region on device tree.

I don't think that's the best approach, because the device tree describes
the hardware, which does not change, and this is a software limitation
(i.e, CMA doesn't work if a CMA region spans different zones), which might
get fixed in a future version of Linux.

In my opinion, the simplest solution would be to check that all tag storage
regions have been activated successfully by CMA before enabling tag
storage. Another alternative would be to split the tag storage region into
several CMA regions at a zone boundary, and add it as distinct CMA regions.

Thanks,
Alex

> 
> Thanks,
> Regards.
> 
> > > 
> > > Thanks,
> > > Regards.
> > > 
> > > > +			for (j = 1; j < block_size; j++) {
> > > > +				if (page_zone(pfn_to_page(pfn + j)) != zone) {
> > > > +					pr_err("Tag storage block pages in different zones");
> > > > +					return -EINVAL;
> > > > +				}
> > > > +			}
> > > > +		}
> > > > +	}
> > > > +
> > > > +	 return 0;
> > > > +}
> > > > +
> > > >  static int __init mte_tag_storage_activate_regions(void)
> > > >  {
> > > >  	phys_addr_t dram_start, dram_end;
> > > > @@ -321,6 +350,10 @@ static int __init mte_tag_storage_activate_regions(void)
> > > >  		goto out_disabled;
> > > >  	}
> > > >  
> > > > +	ret = mte_tag_storage_check_zone();
> > > > +	if (ret)
> > > > +		goto out_disabled;
> > > > +
> > > >  	for (i = 0; i < num_tag_regions; i++) {
> > > >  		tag_range = &tag_regions[i].tag_range;
> > > >  		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += pageblock_nr_pages)
> > > > -- 
> > > > 2.42.1
> > > > 
> > > > 
> > 
> > 
> >
diff mbox series

Patch

diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
index 8b9bedf7575d..fd63430d4dc0 100644
--- a/arch/arm64/kernel/mte_tag_storage.c
+++ b/arch/arm64/kernel/mte_tag_storage.c
@@ -265,6 +265,35 @@  void __init mte_tag_storage_init(void)
 	}
 }
 
+/* alloc_contig_range() requires all pages to be in the same zone. */
+static int __init mte_tag_storage_check_zone(void)
+{
+	struct range *tag_range;
+	struct zone *zone;
+	unsigned long pfn;
+	u32 block_size;
+	int i, j;
+
+	for (i = 0; i < num_tag_regions; i++) {
+		block_size = tag_regions[i].block_size;
+		if (block_size == 1)
+			continue;
+
+		tag_range = &tag_regions[i].tag_range;
+		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += block_size) {
+			zone = page_zone(pfn_to_page(pfn));
+			for (j = 1; j < block_size; j++) {
+				if (page_zone(pfn_to_page(pfn + j)) != zone) {
+					pr_err("Tag storage block pages in different zones");
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+	 return 0;
+}
+
 static int __init mte_tag_storage_activate_regions(void)
 {
 	phys_addr_t dram_start, dram_end;
@@ -321,6 +350,10 @@  static int __init mte_tag_storage_activate_regions(void)
 		goto out_disabled;
 	}
 
+	ret = mte_tag_storage_check_zone();
+	if (ret)
+		goto out_disabled;
+
 	for (i = 0; i < num_tag_regions; i++) {
 		tag_range = &tag_regions[i].tag_range;
 		for (pfn = tag_range->start; pfn <= tag_range->end; pfn += pageblock_nr_pages)